Merge remote-tracking branch 'upstream/main' into sqlancer-test-gha

pull/6697/head
Gokhan Gulbiz 2023-02-07 10:28:42 +03:00
commit 9ffeeac516
No known key found for this signature in database
GPG Key ID: 608EF06B6BD1B45B
95 changed files with 2908 additions and 2217 deletions

View File

@ -1,6 +1,20 @@
package_type=${1} package_type=${1}
git clone -b v0.8.23 --depth=1 https://github.com/citusdata/tools.git tools
# Since $HOME is set in GH_Actions as /github/home, pyenv fails to create virtualenvs.
# For this script, we set $HOME to /root and then set it back to /github/home.
GITHUB_HOME="${HOME}"
export HOME="/root"
eval "$(pyenv init -)"
pyenv versions
pyenv virtualenv ${PACKAGING_PYTHON_VERSION} packaging_env
pyenv activate packaging_env
git clone -b v0.8.24 --depth=1 https://github.com/citusdata/tools.git tools
python3 -m pip install -r tools/packaging_automation/requirements.txt python3 -m pip install -r tools/packaging_automation/requirements.txt
python3 -m tools.packaging_automation.validate_build_output --output_file output.log \ python3 -m tools.packaging_automation.validate_build_output --output_file output.log \
--ignore_file .github/packaging/packaging_ignore.yml \ --ignore_file .github/packaging/packaging_ignore.yml \
--package_type ${package_type} --package_type ${package_type}
pyenv deactivate
# Set $HOME back to /github/home
export HOME=${GITHUB_HOME}

View File

@ -49,14 +49,17 @@ jobs:
container: container:
image: citus/packaging:${{ matrix.packaging_docker_image }}-pg${{ matrix.POSTGRES_VERSION }} image: citus/packaging:${{ matrix.packaging_docker_image }}-pg${{ matrix.POSTGRES_VERSION }}
options: --user root
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v3 uses: actions/checkout@v3
- name: Add Postgres installation directory into PATH for rpm based distros - name: Set Postgres and python parameters for rpm based distros
run: | run: |
echo "/usr/pgsql-${{ matrix.POSTGRES_VERSION }}/bin" >> $GITHUB_PATH echo "/usr/pgsql-${{ matrix.POSTGRES_VERSION }}/bin" >> $GITHUB_PATH
echo "/root/.pyenv/bin:$PATH" >> $GITHUB_PATH
echo "PACKAGING_PYTHON_VERSION=3.8.16" >> $GITHUB_ENV
- name: Configure - name: Configure
run: | run: |
@ -115,14 +118,17 @@ jobs:
container: container:
image: citus/packaging:${{ matrix.packaging_docker_image }} image: citus/packaging:${{ matrix.packaging_docker_image }}
options: --user root
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v3 uses: actions/checkout@v3
- name: Set pg_config path to related Postgres version - name: Set pg_config path and python parameters for deb based distros
run: | run: |
echo "PG_CONFIG=/usr/lib/postgresql/${{ matrix.POSTGRES_VERSION }}/bin/pg_config" >> $GITHUB_ENV echo "PG_CONFIG=/usr/lib/postgresql/${{ matrix.POSTGRES_VERSION }}/bin/pg_config" >> $GITHUB_ENV
echo "/root/.pyenv/bin:$PATH" >> $GITHUB_PATH
echo "PACKAGING_PYTHON_VERSION=3.8.16" >> $GITHUB_ENV
- name: Configure - name: Configure
run: | run: |
@ -154,5 +160,4 @@ jobs:
apt install python3-dev python3-pip -y apt install python3-dev python3-pip -y
sudo apt-get purge -y python3-yaml sudo apt-get purge -y python3-yaml
python3 -m pip install --upgrade pip setuptools==57.5.0 python3 -m pip install --upgrade pip setuptools==57.5.0
./.github/packaging/validate_build_output.sh "deb" ./.github/packaging/validate_build_output.sh "deb"

View File

@ -1,3 +1,107 @@
### citus v11.2.0 (January 30, 2023) ###
* Adds support for outer joins with reference tables / complex subquery-CTEs
in the outer side of the join (e.g., \<reference table\> LEFT JOIN
\<distributed table\>)
* Adds support for creating `PRIMARY KEY`s and `UNIQUE`/`EXCLUSION`/`CHECK`/
`FOREIGN KEY` constraints via `ALTER TABLE` command without providing a
constraint name
* Adds support for using identity columns on Citus tables
* Adds support for `MERGE` command on local tables
* Adds `citus_job_list()`, `citus_job_status()` and `citus_rebalance_status()`
UDFs that allow monitoring rebalancer progress
* Adds `citus_task_wait()` UDF to wait on desired task status
* Adds `source_lsn`, `target_lsn` and `status` fields into
`get_rebalance_progress()`
* Introduces `citus_copy_shard_placement()` UDF with node id
* Introduces `citus_move_shard_placement()` UDF with node id
* Propagates `BEGIN` properties to worker nodes
* Propagates `DROP OWNED BY` to worker nodes
* Deprecates `citus.replicate_reference_tables_on_activate` and makes it
always `off`
* Drops GUC `citus.defer_drop_after_shard_move`
* Drops GUC `citus.defer_drop_after_shard_split`
* Drops `SHARD_STATE_TO_DELETE` state and uses the cleanup records instead
* Allows `citus_update_node()` to work with nodes from different clusters
* Adds signal handlers for queue monitor to gracefully shutdown, cancel and to
see config changes
* Defers cleanup after a failure in shard move or split
* Extends cleanup process for replication artifacts
* Improves a query that terminates compelling backends from
`citus_update_node()`
* Includes Citus global pid in all internal `application_name`s
* Avoids leaking `search_path` to workers when executing DDL commands
* Fixes `alter_table_set_access_method error()` for views
* Fixes `citus_drain_node()` to allow draining the specified worker only
* Fixes a bug in global pid assignment for connections opened by rebalancer
internally
* Fixes a bug that causes background rebalancer to fail when a reference table
doesn't have a primary key
* Fixes a bug that might cause failing to query the views based on tables that
have renamed columns
* Fixes a bug that might cause incorrectly planning the sublinks in query tree
* Fixes a floating point exception during
`create_distributed_table_concurrently()`
* Fixes a rebalancer failure due to integer overflow in subscription and role
creation
* Fixes a regression in allowed foreign keys on distributed tables
* Fixes a use-after-free bug in connection management
* Fixes an unexpected foreign table error by disallowing to drop the
table_name option
* Fixes an uninitialized memory access in `create_distributed_function()`
* Fixes crash that happens when trying to replicate a reference table that is
actually dropped
* Make sure to cleanup the shard on the target node in case of a
failed/aborted shard move
* Makes sure to create replication artifacts with unique names
* Makes sure to disallow triggers that depend on extensions
* Makes sure to quote all identifiers used for logical replication to prevent
potential issues
* Makes sure to skip foreign key validations at the end of shard moves
* Prevents crashes on `UPDATE` with certain `RETURNING` clauses
* Propagates column aliases in the shard-level commands
### citus v11.1.5 (December 12, 2022) ### ### citus v11.1.5 (December 12, 2022) ###
* Fixes two potential dangling pointer issues * Fixes two potential dangling pointer issues

18
configure vendored
View File

@ -1,6 +1,6 @@
#! /bin/sh #! /bin/sh
# Guess values for system-dependent variables and create Makefiles. # Guess values for system-dependent variables and create Makefiles.
# Generated by GNU Autoconf 2.69 for Citus 11.2devel. # Generated by GNU Autoconf 2.69 for Citus 11.3devel.
# #
# #
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
@ -579,8 +579,8 @@ MAKEFLAGS=
# Identity of this package. # Identity of this package.
PACKAGE_NAME='Citus' PACKAGE_NAME='Citus'
PACKAGE_TARNAME='citus' PACKAGE_TARNAME='citus'
PACKAGE_VERSION='11.2devel' PACKAGE_VERSION='11.3devel'
PACKAGE_STRING='Citus 11.2devel' PACKAGE_STRING='Citus 11.3devel'
PACKAGE_BUGREPORT='' PACKAGE_BUGREPORT=''
PACKAGE_URL='' PACKAGE_URL=''
@ -1262,7 +1262,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing. # Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh. # This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF cat <<_ACEOF
\`configure' configures Citus 11.2devel to adapt to many kinds of systems. \`configure' configures Citus 11.3devel to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]... Usage: $0 [OPTION]... [VAR=VALUE]...
@ -1324,7 +1324,7 @@ fi
if test -n "$ac_init_help"; then if test -n "$ac_init_help"; then
case $ac_init_help in case $ac_init_help in
short | recursive ) echo "Configuration of Citus 11.2devel:";; short | recursive ) echo "Configuration of Citus 11.3devel:";;
esac esac
cat <<\_ACEOF cat <<\_ACEOF
@ -1429,7 +1429,7 @@ fi
test -n "$ac_init_help" && exit $ac_status test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then if $ac_init_version; then
cat <<\_ACEOF cat <<\_ACEOF
Citus configure 11.2devel Citus configure 11.3devel
generated by GNU Autoconf 2.69 generated by GNU Autoconf 2.69
Copyright (C) 2012 Free Software Foundation, Inc. Copyright (C) 2012 Free Software Foundation, Inc.
@ -1912,7 +1912,7 @@ cat >config.log <<_ACEOF
This file contains any messages produced by compilers while This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake. running configure, to aid debugging if configure makes a mistake.
It was created by Citus $as_me 11.2devel, which was It was created by Citus $as_me 11.3devel, which was
generated by GNU Autoconf 2.69. Invocation command line was generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@ $ $0 $@
@ -5393,7 +5393,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their # report actual input values of CONFIG_FILES etc. instead of their
# values after options handling. # values after options handling.
ac_log=" ac_log="
This file was extended by Citus $as_me 11.2devel, which was This file was extended by Citus $as_me 11.3devel, which was
generated by GNU Autoconf 2.69. Invocation command line was generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES CONFIG_FILES = $CONFIG_FILES
@ -5455,7 +5455,7 @@ _ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\ ac_cs_version="\\
Citus config.status 11.2devel Citus config.status 11.3devel
configured by $0, generated by GNU Autoconf 2.69, configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\" with options \\"\$ac_cs_config\\"

View File

@ -5,7 +5,7 @@
# everyone needing autoconf installed, the resulting files are checked # everyone needing autoconf installed, the resulting files are checked
# into the SCM. # into the SCM.
AC_INIT([Citus], [11.2devel]) AC_INIT([Citus], [11.3devel])
AC_COPYRIGHT([Copyright (c) Citus Data, Inc.]) AC_COPYRIGHT([Copyright (c) Citus Data, Inc.])
# we'll need sed and awk for some of the version commands # we'll need sed and awk for some of the version commands

View File

@ -52,8 +52,7 @@ Benefits of Citus Columnar over cstore_fdw:
... FOR UPDATE``) ... FOR UPDATE``)
* No support for serializable isolation level * No support for serializable isolation level
* Support for PostgreSQL server versions 12+ only * Support for PostgreSQL server versions 12+ only
* No support for foreign keys, unique constraints, or exclusion * No support for foreign keys
constraints
* No support for logical decoding * No support for logical decoding
* No support for intra-node parallel scans * No support for intra-node parallel scans
* No support for ``AFTER ... FOR EACH ROW`` triggers * No support for ``AFTER ... FOR EACH ROW`` triggers

View File

@ -60,6 +60,10 @@
#include "utils/relfilenodemap.h" #include "utils/relfilenodemap.h"
#define COLUMNAR_RELOPTION_NAMESPACE "columnar" #define COLUMNAR_RELOPTION_NAMESPACE "columnar"
#define SLOW_METADATA_ACCESS_WARNING \
"Metadata index %s is not available, this might mean slower read/writes " \
"on columnar tables. This is expected during Postgres upgrades and not " \
"expected otherwise."
typedef struct typedef struct
{ {
@ -701,15 +705,23 @@ ReadStripeSkipList(RelFileNode relfilenode, uint64 stripe, TupleDesc tupleDescri
Oid columnarChunkOid = ColumnarChunkRelationId(); Oid columnarChunkOid = ColumnarChunkRelationId();
Relation columnarChunk = table_open(columnarChunkOid, AccessShareLock); Relation columnarChunk = table_open(columnarChunkOid, AccessShareLock);
Relation index = index_open(ColumnarChunkIndexRelationId(), AccessShareLock);
ScanKeyInit(&scanKey[0], Anum_columnar_chunk_storageid, ScanKeyInit(&scanKey[0], Anum_columnar_chunk_storageid,
BTEqualStrategyNumber, F_OIDEQ, UInt64GetDatum(storageId)); BTEqualStrategyNumber, F_OIDEQ, UInt64GetDatum(storageId));
ScanKeyInit(&scanKey[1], Anum_columnar_chunk_stripe, ScanKeyInit(&scanKey[1], Anum_columnar_chunk_stripe,
BTEqualStrategyNumber, F_OIDEQ, Int32GetDatum(stripe)); BTEqualStrategyNumber, F_OIDEQ, Int32GetDatum(stripe));
SysScanDesc scanDescriptor = systable_beginscan_ordered(columnarChunk, index, Oid indexId = ColumnarChunkIndexRelationId();
snapshot, 2, scanKey); bool indexOk = OidIsValid(indexId);
SysScanDesc scanDescriptor = systable_beginscan(columnarChunk, indexId,
indexOk, snapshot, 2, scanKey);
static bool loggedSlowMetadataAccessWarning = false;
if (!indexOk && !loggedSlowMetadataAccessWarning)
{
ereport(WARNING, (errmsg(SLOW_METADATA_ACCESS_WARNING, "chunk_pkey")));
loggedSlowMetadataAccessWarning = true;
}
StripeSkipList *chunkList = palloc0(sizeof(StripeSkipList)); StripeSkipList *chunkList = palloc0(sizeof(StripeSkipList));
chunkList->chunkCount = chunkCount; chunkList->chunkCount = chunkCount;
@ -721,8 +733,7 @@ ReadStripeSkipList(RelFileNode relfilenode, uint64 stripe, TupleDesc tupleDescri
palloc0(chunkCount * sizeof(ColumnChunkSkipNode)); palloc0(chunkCount * sizeof(ColumnChunkSkipNode));
} }
while (HeapTupleIsValid(heapTuple = systable_getnext_ordered(scanDescriptor, while (HeapTupleIsValid(heapTuple = systable_getnext(scanDescriptor)))
ForwardScanDirection)))
{ {
Datum datumArray[Natts_columnar_chunk]; Datum datumArray[Natts_columnar_chunk];
bool isNullArray[Natts_columnar_chunk]; bool isNullArray[Natts_columnar_chunk];
@ -787,8 +798,7 @@ ReadStripeSkipList(RelFileNode relfilenode, uint64 stripe, TupleDesc tupleDescri
} }
} }
systable_endscan_ordered(scanDescriptor); systable_endscan(scanDescriptor);
index_close(index, AccessShareLock);
table_close(columnarChunk, AccessShareLock); table_close(columnarChunk, AccessShareLock);
chunkList->chunkGroupRowCounts = chunkList->chunkGroupRowCounts =
@ -799,9 +809,9 @@ ReadStripeSkipList(RelFileNode relfilenode, uint64 stripe, TupleDesc tupleDescri
/* /*
* FindStripeByRowNumber returns StripeMetadata for the stripe whose * FindStripeByRowNumber returns StripeMetadata for the stripe that has the
* firstRowNumber is greater than given rowNumber. If no such stripe * smallest firstRowNumber among the stripes whose firstRowNumber is grater
* exists, then returns NULL. * than given rowNumber. If no such stripe exists, then returns NULL.
*/ */
StripeMetadata * StripeMetadata *
FindNextStripeByRowNumber(Relation relation, uint64 rowNumber, Snapshot snapshot) FindNextStripeByRowNumber(Relation relation, uint64 rowNumber, Snapshot snapshot)
@ -891,8 +901,7 @@ StripeGetHighestRowNumber(StripeMetadata *stripeMetadata)
/* /*
* StripeMetadataLookupRowNumber returns StripeMetadata for the stripe whose * StripeMetadataLookupRowNumber returns StripeMetadata for the stripe whose
* firstRowNumber is less than or equal to (FIND_LESS_OR_EQUAL), or is * firstRowNumber is less than or equal to (FIND_LESS_OR_EQUAL), or is
* greater than (FIND_GREATER) given rowNumber by doing backward index * greater than (FIND_GREATER) given rowNumber.
* scan on stripe_first_row_number_idx.
* If no such stripe exists, then returns NULL. * If no such stripe exists, then returns NULL.
*/ */
static StripeMetadata * static StripeMetadata *
@ -923,14 +932,23 @@ StripeMetadataLookupRowNumber(Relation relation, uint64 rowNumber, Snapshot snap
ScanKeyInit(&scanKey[1], Anum_columnar_stripe_first_row_number, ScanKeyInit(&scanKey[1], Anum_columnar_stripe_first_row_number,
strategyNumber, procedure, UInt64GetDatum(rowNumber)); strategyNumber, procedure, UInt64GetDatum(rowNumber));
Relation columnarStripes = table_open(ColumnarStripeRelationId(), AccessShareLock); Relation columnarStripes = table_open(ColumnarStripeRelationId(), AccessShareLock);
Relation index = index_open(ColumnarStripeFirstRowNumberIndexRelationId(),
AccessShareLock);
SysScanDesc scanDescriptor = systable_beginscan_ordered(columnarStripes, index,
snapshot, 2,
scanKey);
Oid indexId = ColumnarStripeFirstRowNumberIndexRelationId();
bool indexOk = OidIsValid(indexId);
SysScanDesc scanDescriptor = systable_beginscan(columnarStripes, indexId, indexOk,
snapshot, 2, scanKey);
static bool loggedSlowMetadataAccessWarning = false;
if (!indexOk && !loggedSlowMetadataAccessWarning)
{
ereport(WARNING, (errmsg(SLOW_METADATA_ACCESS_WARNING,
"stripe_first_row_number_idx")));
loggedSlowMetadataAccessWarning = true;
}
if (indexOk)
{
ScanDirection scanDirection = NoMovementScanDirection; ScanDirection scanDirection = NoMovementScanDirection;
if (lookupMode == FIND_LESS_OR_EQUAL) if (lookupMode == FIND_LESS_OR_EQUAL)
{ {
@ -945,9 +963,40 @@ StripeMetadataLookupRowNumber(Relation relation, uint64 rowNumber, Snapshot snap
{ {
foundStripeMetadata = BuildStripeMetadata(columnarStripes, heapTuple); foundStripeMetadata = BuildStripeMetadata(columnarStripes, heapTuple);
} }
}
else
{
HeapTuple heapTuple = NULL;
while (HeapTupleIsValid(heapTuple = systable_getnext(scanDescriptor)))
{
StripeMetadata *stripe = BuildStripeMetadata(columnarStripes, heapTuple);
if (!foundStripeMetadata)
{
/* first match */
foundStripeMetadata = stripe;
}
else if (lookupMode == FIND_LESS_OR_EQUAL &&
stripe->firstRowNumber > foundStripeMetadata->firstRowNumber)
{
/*
* Among the stripes with firstRowNumber less-than-or-equal-to given,
* we're looking for the one with the greatest firstRowNumber.
*/
foundStripeMetadata = stripe;
}
else if (lookupMode == FIND_GREATER &&
stripe->firstRowNumber < foundStripeMetadata->firstRowNumber)
{
/*
* Among the stripes with firstRowNumber greater-than given,
* we're looking for the one with the smallest firstRowNumber.
*/
foundStripeMetadata = stripe;
}
}
}
systable_endscan_ordered(scanDescriptor); systable_endscan(scanDescriptor);
index_close(index, AccessShareLock);
table_close(columnarStripes, AccessShareLock); table_close(columnarStripes, AccessShareLock);
return foundStripeMetadata; return foundStripeMetadata;
@ -1021,8 +1070,8 @@ CheckStripeMetadataConsistency(StripeMetadata *stripeMetadata)
/* /*
* FindStripeWithHighestRowNumber returns StripeMetadata for the stripe that * FindStripeWithHighestRowNumber returns StripeMetadata for the stripe that
* has the row with highest rowNumber by doing backward index scan on * has the row with highest rowNumber. If given relation is empty, then returns
* stripe_first_row_number_idx. If given relation is empty, then returns NULL. * NULL.
*/ */
StripeMetadata * StripeMetadata *
FindStripeWithHighestRowNumber(Relation relation, Snapshot snapshot) FindStripeWithHighestRowNumber(Relation relation, Snapshot snapshot)
@ -1035,19 +1084,46 @@ FindStripeWithHighestRowNumber(Relation relation, Snapshot snapshot)
BTEqualStrategyNumber, F_OIDEQ, Int32GetDatum(storageId)); BTEqualStrategyNumber, F_OIDEQ, Int32GetDatum(storageId));
Relation columnarStripes = table_open(ColumnarStripeRelationId(), AccessShareLock); Relation columnarStripes = table_open(ColumnarStripeRelationId(), AccessShareLock);
Relation index = index_open(ColumnarStripeFirstRowNumberIndexRelationId(),
AccessShareLock); Oid indexId = ColumnarStripeFirstRowNumberIndexRelationId();
SysScanDesc scanDescriptor = systable_beginscan_ordered(columnarStripes, index, bool indexOk = OidIsValid(indexId);
SysScanDesc scanDescriptor = systable_beginscan(columnarStripes, indexId, indexOk,
snapshot, 1, scanKey); snapshot, 1, scanKey);
HeapTuple heapTuple = systable_getnext_ordered(scanDescriptor, BackwardScanDirection); static bool loggedSlowMetadataAccessWarning = false;
if (!indexOk && !loggedSlowMetadataAccessWarning)
{
ereport(WARNING, (errmsg(SLOW_METADATA_ACCESS_WARNING,
"stripe_first_row_number_idx")));
loggedSlowMetadataAccessWarning = true;
}
if (indexOk)
{
/* do one-time fetch using the index */
HeapTuple heapTuple = systable_getnext_ordered(scanDescriptor,
BackwardScanDirection);
if (HeapTupleIsValid(heapTuple)) if (HeapTupleIsValid(heapTuple))
{ {
stripeWithHighestRowNumber = BuildStripeMetadata(columnarStripes, heapTuple); stripeWithHighestRowNumber = BuildStripeMetadata(columnarStripes, heapTuple);
} }
}
else
{
HeapTuple heapTuple = NULL;
while (HeapTupleIsValid(heapTuple = systable_getnext(scanDescriptor)))
{
StripeMetadata *stripe = BuildStripeMetadata(columnarStripes, heapTuple);
if (!stripeWithHighestRowNumber ||
stripe->firstRowNumber > stripeWithHighestRowNumber->firstRowNumber)
{
/* first or a greater match */
stripeWithHighestRowNumber = stripe;
}
}
}
systable_endscan_ordered(scanDescriptor); systable_endscan(scanDescriptor);
index_close(index, AccessShareLock);
table_close(columnarStripes, AccessShareLock); table_close(columnarStripes, AccessShareLock);
return stripeWithHighestRowNumber; return stripeWithHighestRowNumber;
@ -1064,7 +1140,6 @@ ReadChunkGroupRowCounts(uint64 storageId, uint64 stripe, uint32 chunkGroupCount,
{ {
Oid columnarChunkGroupOid = ColumnarChunkGroupRelationId(); Oid columnarChunkGroupOid = ColumnarChunkGroupRelationId();
Relation columnarChunkGroup = table_open(columnarChunkGroupOid, AccessShareLock); Relation columnarChunkGroup = table_open(columnarChunkGroupOid, AccessShareLock);
Relation index = index_open(ColumnarChunkGroupIndexRelationId(), AccessShareLock);
ScanKeyData scanKey[2]; ScanKeyData scanKey[2];
ScanKeyInit(&scanKey[0], Anum_columnar_chunkgroup_storageid, ScanKeyInit(&scanKey[0], Anum_columnar_chunkgroup_storageid,
@ -1072,15 +1147,22 @@ ReadChunkGroupRowCounts(uint64 storageId, uint64 stripe, uint32 chunkGroupCount,
ScanKeyInit(&scanKey[1], Anum_columnar_chunkgroup_stripe, ScanKeyInit(&scanKey[1], Anum_columnar_chunkgroup_stripe,
BTEqualStrategyNumber, F_OIDEQ, Int32GetDatum(stripe)); BTEqualStrategyNumber, F_OIDEQ, Int32GetDatum(stripe));
Oid indexId = ColumnarChunkGroupIndexRelationId();
bool indexOk = OidIsValid(indexId);
SysScanDesc scanDescriptor = SysScanDesc scanDescriptor =
systable_beginscan_ordered(columnarChunkGroup, index, snapshot, 2, scanKey); systable_beginscan(columnarChunkGroup, indexId, indexOk, snapshot, 2, scanKey);
static bool loggedSlowMetadataAccessWarning = false;
if (!indexOk && !loggedSlowMetadataAccessWarning)
{
ereport(WARNING, (errmsg(SLOW_METADATA_ACCESS_WARNING, "chunk_group_pkey")));
loggedSlowMetadataAccessWarning = true;
}
uint32 chunkGroupIndex = 0;
HeapTuple heapTuple = NULL; HeapTuple heapTuple = NULL;
uint32 *chunkGroupRowCounts = palloc0(chunkGroupCount * sizeof(uint32)); uint32 *chunkGroupRowCounts = palloc0(chunkGroupCount * sizeof(uint32));
while (HeapTupleIsValid(heapTuple = systable_getnext_ordered(scanDescriptor, while (HeapTupleIsValid(heapTuple = systable_getnext(scanDescriptor)))
ForwardScanDirection)))
{ {
Datum datumArray[Natts_columnar_chunkgroup]; Datum datumArray[Natts_columnar_chunkgroup];
bool isNullArray[Natts_columnar_chunkgroup]; bool isNullArray[Natts_columnar_chunkgroup];
@ -1091,24 +1173,16 @@ ReadChunkGroupRowCounts(uint64 storageId, uint64 stripe, uint32 chunkGroupCount,
uint32 tupleChunkGroupIndex = uint32 tupleChunkGroupIndex =
DatumGetUInt32(datumArray[Anum_columnar_chunkgroup_chunk - 1]); DatumGetUInt32(datumArray[Anum_columnar_chunkgroup_chunk - 1]);
if (chunkGroupIndex >= chunkGroupCount || if (tupleChunkGroupIndex >= chunkGroupCount)
tupleChunkGroupIndex != chunkGroupIndex)
{ {
elog(ERROR, "unexpected chunk group"); elog(ERROR, "unexpected chunk group");
} }
chunkGroupRowCounts[chunkGroupIndex] = chunkGroupRowCounts[tupleChunkGroupIndex] =
(uint32) DatumGetUInt64(datumArray[Anum_columnar_chunkgroup_row_count - 1]); (uint32) DatumGetUInt64(datumArray[Anum_columnar_chunkgroup_row_count - 1]);
chunkGroupIndex++;
} }
if (chunkGroupIndex != chunkGroupCount) systable_endscan(scanDescriptor);
{
elog(ERROR, "unexpected chunk group count");
}
systable_endscan_ordered(scanDescriptor);
index_close(index, AccessShareLock);
table_close(columnarChunkGroup, AccessShareLock); table_close(columnarChunkGroup, AccessShareLock);
return chunkGroupRowCounts; return chunkGroupRowCounts;
@ -1305,14 +1379,20 @@ UpdateStripeMetadataRow(uint64 storageId, uint64 stripeId, bool *update,
Oid columnarStripesOid = ColumnarStripeRelationId(); Oid columnarStripesOid = ColumnarStripeRelationId();
Relation columnarStripes = table_open(columnarStripesOid, AccessShareLock); Relation columnarStripes = table_open(columnarStripesOid, AccessShareLock);
Relation columnarStripePkeyIndex = index_open(ColumnarStripePKeyIndexRelationId(),
AccessShareLock);
SysScanDesc scanDescriptor = systable_beginscan_ordered(columnarStripes, Oid indexId = ColumnarStripePKeyIndexRelationId();
columnarStripePkeyIndex, bool indexOk = OidIsValid(indexId);
SysScanDesc scanDescriptor = systable_beginscan(columnarStripes, indexId, indexOk,
&dirtySnapshot, 2, scanKey); &dirtySnapshot, 2, scanKey);
HeapTuple oldTuple = systable_getnext_ordered(scanDescriptor, ForwardScanDirection); static bool loggedSlowMetadataAccessWarning = false;
if (!indexOk && !loggedSlowMetadataAccessWarning)
{
ereport(WARNING, (errmsg(SLOW_METADATA_ACCESS_WARNING, "stripe_pkey")));
loggedSlowMetadataAccessWarning = true;
}
HeapTuple oldTuple = systable_getnext(scanDescriptor);
if (!HeapTupleIsValid(oldTuple)) if (!HeapTupleIsValid(oldTuple))
{ {
ereport(ERROR, (errmsg("attempted to modify an unexpected stripe, " ereport(ERROR, (errmsg("attempted to modify an unexpected stripe, "
@ -1347,8 +1427,7 @@ UpdateStripeMetadataRow(uint64 storageId, uint64 stripeId, bool *update,
CommandCounterIncrement(); CommandCounterIncrement();
systable_endscan_ordered(scanDescriptor); systable_endscan(scanDescriptor);
index_close(columnarStripePkeyIndex, AccessShareLock);
table_close(columnarStripes, AccessShareLock); table_close(columnarStripes, AccessShareLock);
/* return StripeMetadata object built from modified tuple */ /* return StripeMetadata object built from modified tuple */
@ -1359,6 +1438,10 @@ UpdateStripeMetadataRow(uint64 storageId, uint64 stripeId, bool *update,
/* /*
* ReadDataFileStripeList reads the stripe list for a given storageId * ReadDataFileStripeList reads the stripe list for a given storageId
* in the given snapshot. * in the given snapshot.
*
* Doesn't sort the stripes by their ids before returning if
* stripe_first_row_number_idx is not available --normally can only happen
* during pg upgrades.
*/ */
static List * static List *
ReadDataFileStripeList(uint64 storageId, Snapshot snapshot) ReadDataFileStripeList(uint64 storageId, Snapshot snapshot)
@ -1373,22 +1456,27 @@ ReadDataFileStripeList(uint64 storageId, Snapshot snapshot)
Oid columnarStripesOid = ColumnarStripeRelationId(); Oid columnarStripesOid = ColumnarStripeRelationId();
Relation columnarStripes = table_open(columnarStripesOid, AccessShareLock); Relation columnarStripes = table_open(columnarStripesOid, AccessShareLock);
Relation index = index_open(ColumnarStripeFirstRowNumberIndexRelationId(),
AccessShareLock);
SysScanDesc scanDescriptor = systable_beginscan_ordered(columnarStripes, index, Oid indexId = ColumnarStripeFirstRowNumberIndexRelationId();
snapshot, 1, bool indexOk = OidIsValid(indexId);
scanKey); SysScanDesc scanDescriptor = systable_beginscan(columnarStripes, indexId,
indexOk, snapshot, 1, scanKey);
while (HeapTupleIsValid(heapTuple = systable_getnext_ordered(scanDescriptor, static bool loggedSlowMetadataAccessWarning = false;
ForwardScanDirection))) if (!indexOk && !loggedSlowMetadataAccessWarning)
{
ereport(WARNING, (errmsg(SLOW_METADATA_ACCESS_WARNING,
"stripe_first_row_number_idx")));
loggedSlowMetadataAccessWarning = true;
}
while (HeapTupleIsValid(heapTuple = systable_getnext(scanDescriptor)))
{ {
StripeMetadata *stripeMetadata = BuildStripeMetadata(columnarStripes, heapTuple); StripeMetadata *stripeMetadata = BuildStripeMetadata(columnarStripes, heapTuple);
stripeMetadataList = lappend(stripeMetadataList, stripeMetadata); stripeMetadataList = lappend(stripeMetadataList, stripeMetadata);
} }
systable_endscan_ordered(scanDescriptor); systable_endscan(scanDescriptor);
index_close(index, AccessShareLock);
table_close(columnarStripes, AccessShareLock); table_close(columnarStripes, AccessShareLock);
return stripeMetadataList; return stripeMetadataList;
@ -1499,25 +1587,30 @@ DeleteStorageFromColumnarMetadataTable(Oid metadataTableId,
return; return;
} }
Relation index = index_open(storageIdIndexId, AccessShareLock); bool indexOk = OidIsValid(storageIdIndexId);
SysScanDesc scanDescriptor = systable_beginscan(metadataTable, storageIdIndexId,
indexOk, NULL, 1, scanKey);
SysScanDesc scanDescriptor = systable_beginscan_ordered(metadataTable, index, NULL, static bool loggedSlowMetadataAccessWarning = false;
1, scanKey); if (!indexOk && !loggedSlowMetadataAccessWarning)
{
ereport(WARNING, (errmsg(SLOW_METADATA_ACCESS_WARNING,
"on a columnar metadata table")));
loggedSlowMetadataAccessWarning = true;
}
ModifyState *modifyState = StartModifyRelation(metadataTable); ModifyState *modifyState = StartModifyRelation(metadataTable);
HeapTuple heapTuple; HeapTuple heapTuple;
while (HeapTupleIsValid(heapTuple = systable_getnext_ordered(scanDescriptor, while (HeapTupleIsValid(heapTuple = systable_getnext(scanDescriptor)))
ForwardScanDirection)))
{ {
DeleteTupleAndEnforceConstraints(modifyState, heapTuple); DeleteTupleAndEnforceConstraints(modifyState, heapTuple);
} }
systable_endscan_ordered(scanDescriptor); systable_endscan(scanDescriptor);
FinishModifyRelation(modifyState); FinishModifyRelation(modifyState);
index_close(index, AccessShareLock);
table_close(metadataTable, AccessShareLock); table_close(metadataTable, AccessShareLock);
} }

View File

@ -1,6 +1,6 @@
# Citus extension # Citus extension
comment = 'Citus distributed database' comment = 'Citus distributed database'
default_version = '11.2-1' default_version = '11.3-1'
module_pathname = '$libdir/citus' module_pathname = '$libdir/citus'
relocatable = false relocatable = false
schema = pg_catalog schema = pg_catalog

View File

@ -623,18 +623,13 @@ ExecuteForeignKeyCreateCommand(const char *commandString, bool skip_validation)
*/ */
Assert(IsA(parseTree, AlterTableStmt)); Assert(IsA(parseTree, AlterTableStmt));
bool oldSkipConstraintsValidationValue = SkipConstraintValidation;
if (skip_validation && IsA(parseTree, AlterTableStmt)) if (skip_validation && IsA(parseTree, AlterTableStmt))
{ {
EnableSkippingConstraintValidation(); SkipForeignKeyValidationIfConstraintIsFkey((AlterTableStmt *) parseTree, true);
ereport(DEBUG4, (errmsg("skipping validation for foreign key create " ereport(DEBUG4, (errmsg("skipping validation for foreign key create "
"command \"%s\"", commandString))); "command \"%s\"", commandString)));
} }
ProcessUtilityParseTree(parseTree, commandString, PROCESS_UTILITY_QUERY, ProcessUtilityParseTree(parseTree, commandString, PROCESS_UTILITY_QUERY,
NULL, None_Receiver, NULL); NULL, None_Receiver, NULL);
SkipConstraintValidation = oldSkipConstraintsValidationValue;
} }

View File

@ -46,6 +46,7 @@
#include "utils/lsyscache.h" #include "utils/lsyscache.h"
#include "utils/ruleutils.h" #include "utils/ruleutils.h"
#include "utils/syscache.h" #include "utils/syscache.h"
#include "foreign/foreign.h"
/* /*
@ -60,6 +61,8 @@ static void citus_add_local_table_to_metadata_internal(Oid relationId,
static void ErrorIfAddingPartitionTableToMetadata(Oid relationId); static void ErrorIfAddingPartitionTableToMetadata(Oid relationId);
static void ErrorIfUnsupportedCreateCitusLocalTable(Relation relation); static void ErrorIfUnsupportedCreateCitusLocalTable(Relation relation);
static void ErrorIfUnsupportedCitusLocalTableKind(Oid relationId); static void ErrorIfUnsupportedCitusLocalTableKind(Oid relationId);
static void EnsureIfPostgresFdwHasTableName(Oid relationId);
static void ErrorIfOptionListHasNoTableName(List *optionList);
static void NoticeIfAutoConvertingLocalTables(bool autoConverted, Oid relationId); static void NoticeIfAutoConvertingLocalTables(bool autoConverted, Oid relationId);
static CascadeOperationType GetCascadeTypeForCitusLocalTables(bool autoConverted); static CascadeOperationType GetCascadeTypeForCitusLocalTables(bool autoConverted);
static List * GetShellTableDDLEventsForCitusLocalTable(Oid relationId); static List * GetShellTableDDLEventsForCitusLocalTable(Oid relationId);
@ -494,6 +497,16 @@ ErrorIfUnsupportedCreateCitusLocalTable(Relation relation)
EnsureTableNotDistributed(relationId); EnsureTableNotDistributed(relationId);
ErrorIfRelationHasUnsupportedTrigger(relationId); ErrorIfRelationHasUnsupportedTrigger(relationId);
/*
* Error out with a hint if the foreign table is using postgres_fdw and
* the option table_name is not provided.
* Citus relays all the Citus local foreign table logic to the placement of the
* Citus local table. If table_name is NOT provided, Citus would try to talk to
* the foreign postgres table over the shard's table name, which would not exist
* on the remote server.
*/
EnsureIfPostgresFdwHasTableName(relationId);
/* /*
* When creating other citus table types, we don't need to check that case as * When creating other citus table types, we don't need to check that case as
* EnsureTableNotDistributed already errors out if the given relation implies * EnsureTableNotDistributed already errors out if the given relation implies
@ -509,6 +522,93 @@ ErrorIfUnsupportedCreateCitusLocalTable(Relation relation)
} }
/*
* ServerUsesPostgresFdw gets a foreign server Oid and returns true if the FDW that
* the server depends on is postgres_fdw. Returns false otherwise.
*/
bool
ServerUsesPostgresFdw(Oid serverId)
{
ForeignServer *server = GetForeignServer(serverId);
ForeignDataWrapper *fdw = GetForeignDataWrapper(server->fdwid);
if (strcmp(fdw->fdwname, "postgres_fdw") == 0)
{
return true;
}
return false;
}
/*
* EnsureIfPostgresFdwHasTableName errors out with a hint if the foreign table is using postgres_fdw and
* the option table_name is not provided.
*/
static void
EnsureIfPostgresFdwHasTableName(Oid relationId)
{
char relationKind = get_rel_relkind(relationId);
if (relationKind == RELKIND_FOREIGN_TABLE)
{
ForeignTable *foreignTable = GetForeignTable(relationId);
if (ServerUsesPostgresFdw(foreignTable->serverid))
{
ErrorIfOptionListHasNoTableName(foreignTable->options);
}
}
}
/*
* ErrorIfOptionListHasNoTableName gets an option list (DefElem) and errors out
* if the list does not contain a table_name element.
*/
static void
ErrorIfOptionListHasNoTableName(List *optionList)
{
char *table_nameString = "table_name";
DefElem *option = NULL;
foreach_ptr(option, optionList)
{
char *optionName = option->defname;
if (strcmp(optionName, table_nameString) == 0)
{
return;
}
}
ereport(ERROR, (errmsg(
"table_name option must be provided when using postgres_fdw with Citus"),
errhint("Provide the option \"table_name\" with value target table's"
" name")));
}
/*
* ForeignTableDropsTableNameOption returns true if given option list contains
* (DROP table_name).
*/
bool
ForeignTableDropsTableNameOption(List *optionList)
{
char *table_nameString = "table_name";
DefElem *option = NULL;
foreach_ptr(option, optionList)
{
char *optionName = option->defname;
DefElemAction optionAction = option->defaction;
if (strcmp(optionName, table_nameString) == 0 &&
optionAction == DEFELEM_DROP)
{
return true;
}
}
return false;
}
/* /*
* ErrorIfUnsupportedCitusLocalTableKind errors out if the relation kind of * ErrorIfUnsupportedCitusLocalTableKind errors out if the relation kind of
* relation with relationId is not supported for citus local table creation. * relation with relationId is not supported for citus local table creation.

View File

@ -2337,27 +2337,6 @@ TupleDescColumnNameList(TupleDesc tupleDescriptor)
} }
/*
* RelationUsesIdentityColumns returns whether a given relation uses
* GENERATED ... AS IDENTITY
*/
bool
RelationUsesIdentityColumns(TupleDesc relationDesc)
{
for (int attributeIndex = 0; attributeIndex < relationDesc->natts; attributeIndex++)
{
Form_pg_attribute attributeForm = TupleDescAttr(relationDesc, attributeIndex);
if (attributeForm->attidentity != '\0')
{
return true;
}
}
return false;
}
#if (PG_VERSION_NUM >= PG_VERSION_15) #if (PG_VERSION_NUM >= PG_VERSION_15)
/* /*

View File

@ -1311,19 +1311,6 @@ IsTableTypeIncluded(Oid relationId, int flags)
} }
/*
* EnableSkippingConstraintValidation is simply a C interface for setting the following:
* SET LOCAL citus.skip_constraint_validation TO on;
*/
void
EnableSkippingConstraintValidation()
{
set_config_option("citus.skip_constraint_validation", "true",
PGC_SUSET, PGC_S_SESSION,
GUC_ACTION_LOCAL, true, 0, false);
}
/* /*
* RelationInvolvedInAnyNonInheritedForeignKeys returns true if relation involved * RelationInvolvedInAnyNonInheritedForeignKeys returns true if relation involved
* in a foreign key that is not inherited from its parent relation. * in a foreign key that is not inherited from its parent relation.

View File

@ -752,6 +752,8 @@ UpdateFunctionDistributionInfo(const ObjectAddress *distAddress,
distAddress->objectId, distAddress->objectSubId))); distAddress->objectId, distAddress->objectSubId)));
} }
memset(values, 0, sizeof(values));
memset(isnull, 0, sizeof(isnull));
memset(replace, 0, sizeof(replace)); memset(replace, 0, sizeof(replace));
replace[Anum_pg_dist_object_distribution_argument_index - 1] = true; replace[Anum_pg_dist_object_distribution_argument_index - 1] = true;

View File

@ -41,6 +41,7 @@
#include "distributed/resource_lock.h" #include "distributed/resource_lock.h"
#include "distributed/version_compat.h" #include "distributed/version_compat.h"
#include "distributed/worker_shard_visibility.h" #include "distributed/worker_shard_visibility.h"
#include "foreign/foreign.h"
#include "lib/stringinfo.h" #include "lib/stringinfo.h"
#include "nodes/parsenodes.h" #include "nodes/parsenodes.h"
#include "parser/parse_expr.h" #include "parser/parse_expr.h"
@ -119,6 +120,8 @@ static Oid get_attrdef_oid(Oid relationId, AttrNumber attnum);
static char * GetAddColumnWithNextvalDefaultCmd(Oid sequenceOid, Oid relationId, static char * GetAddColumnWithNextvalDefaultCmd(Oid sequenceOid, Oid relationId,
char *colname, TypeName *typeName); char *colname, TypeName *typeName);
static void ErrorIfAlterTableDropTableNameFromPostgresFdw(List *optionList, Oid
relationId);
/* /*
@ -956,11 +959,15 @@ PreprocessAlterTableAddConstraint(AlterTableStmt *alterTableStatement, Oid
relationId, relationId,
Constraint *constraint) Constraint *constraint)
{ {
/* We should only preprocess an ADD CONSTRAINT command if we are changing the it. /*
* We should only preprocess an ADD CONSTRAINT command if we have empty conname
* This only happens when we have to create a constraint name in citus since the client does * This only happens when we have to create a constraint name in citus since the client does
* not specify a name. * not specify a name.
* indexname should also be NULL to make sure this is not an
* ADD {PRIMARY KEY, UNIQUE} USING INDEX command
* which doesn't need a conname since the indexname will be used
*/ */
Assert(constraint->conname == NULL); Assert(constraint->conname == NULL && constraint->indexname == NULL);
Relation rel = RelationIdGetRelation(relationId); Relation rel = RelationIdGetRelation(relationId);
@ -1269,7 +1276,13 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
constraint); constraint);
} }
} }
else if (constraint->conname == NULL) /*
* When constraint->indexname is not NULL we are handling an
* ADD {PRIMARY KEY, UNIQUE} USING INDEX command. In this case
* we do not have to create a name and change the command.
* The existing index name will be used by the postgres.
*/
else if (constraint->conname == NULL && constraint->indexname == NULL)
{ {
if (ConstrTypeCitusCanDefaultName(constraint->contype)) if (ConstrTypeCitusCanDefaultName(constraint->contype))
{ {
@ -2255,7 +2268,8 @@ PreprocessAlterTableSchemaStmt(Node *node, const char *queryString,
* ALTER TABLE ... ADD FOREIGN KEY command to skip the validation step. * ALTER TABLE ... ADD FOREIGN KEY command to skip the validation step.
*/ */
void void
SkipForeignKeyValidationIfConstraintIsFkey(AlterTableStmt *alterTableStatement) SkipForeignKeyValidationIfConstraintIsFkey(AlterTableStmt *alterTableStatement,
bool processLocalRelation)
{ {
/* first check whether a distributed relation is affected */ /* first check whether a distributed relation is affected */
if (alterTableStatement->relation == NULL) if (alterTableStatement->relation == NULL)
@ -2270,11 +2284,17 @@ SkipForeignKeyValidationIfConstraintIsFkey(AlterTableStmt *alterTableStatement)
return; return;
} }
if (!IsCitusTable(leftRelationId)) if (!IsCitusTable(leftRelationId) && !processLocalRelation)
{ {
return; return;
} }
/*
* We check if there is a ADD FOREIGN CONSTRAINT command in sub commands
* list. We set skip_validation to true to prevent PostgreSQL to verify
* validity of the foreign constraint. Validity will be checked on the
* shards anyway.
*/
AlterTableCmd *command = NULL; AlterTableCmd *command = NULL;
foreach_ptr(command, alterTableStatement->cmds) foreach_ptr(command, alterTableStatement->cmds)
{ {
@ -2286,9 +2306,8 @@ SkipForeignKeyValidationIfConstraintIsFkey(AlterTableStmt *alterTableStatement)
Constraint *constraint = (Constraint *) command->def; Constraint *constraint = (Constraint *) command->def;
if (constraint->contype == CONSTR_FOREIGN) if (constraint->contype == CONSTR_FOREIGN)
{ {
/* set the GUC skip_constraint_validation to on */ /* foreign constraint validations will be done in shards. */
EnableSkippingConstraintValidation(); constraint->skip_validation = true;
return;
} }
} }
} }
@ -3062,6 +3081,42 @@ ErrorIfUnsupportedConstraint(Relation relation, char distributionMethod,
} }
/*
* ErrorIfAlterTableDropTableNameFromPostgresFdw errors if given alter foreign table
* option list drops 'table_name' from a postgresfdw foreign table which is
* inside metadata.
*/
static void
ErrorIfAlterTableDropTableNameFromPostgresFdw(List *optionList, Oid relationId)
{
char relationKind PG_USED_FOR_ASSERTS_ONLY =
get_rel_relkind(relationId);
Assert(relationKind == RELKIND_FOREIGN_TABLE);
ForeignTable *foreignTable = GetForeignTable(relationId);
Oid serverId = foreignTable->serverid;
if (!ServerUsesPostgresFdw(serverId))
{
return;
}
if (IsCitusTableType(relationId, CITUS_LOCAL_TABLE) &&
ForeignTableDropsTableNameOption(optionList))
{
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg(
"alter foreign table alter options (drop table_name) command "
"is not allowed for Citus tables"),
errdetail(
"Table_name option can not be dropped from a foreign table "
"which is inside metadata."),
errhint(
"Try to undistribute foreign table before dropping table_name option.")));
}
}
/* /*
* ErrorIfUnsupportedAlterTableStmt checks if the corresponding alter table * ErrorIfUnsupportedAlterTableStmt checks if the corresponding alter table
* statement is supported for distributed tables and errors out if it is not. * statement is supported for distributed tables and errors out if it is not.
@ -3320,8 +3375,6 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
case AT_AddConstraint: case AT_AddConstraint:
{ {
Constraint *constraint = (Constraint *) command->def;
/* we only allow constraints if they are only subcommand */ /* we only allow constraints if they are only subcommand */
if (commandList->length > 1) if (commandList->length > 1)
{ {
@ -3331,26 +3384,6 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
errhint("You can issue each subcommand separately"))); errhint("You can issue each subcommand separately")));
} }
/*
* We will use constraint name in each placement by extending it at
* workers. Therefore we require it to be exist.
*/
if (constraint->conname == NULL)
{
/*
* We support ALTER TABLE ... ADD PRIMARY ... commands by creating a constraint name
* and changing the command into the following form.
* ALTER TABLE ... ADD CONSTRAINT <constaint_name> PRIMARY KEY ...
*/
if (ConstrTypeCitusCanDefaultName(constraint->contype) == false)
{
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg(
"cannot create constraint without a name on a "
"distributed table")));
}
}
break; break;
} }
@ -3496,6 +3529,8 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
{ {
if (IsForeignTable(relationId)) if (IsForeignTable(relationId))
{ {
List *optionList = (List *) command->def;
ErrorIfAlterTableDropTableNameFromPostgresFdw(optionList, relationId);
break; break;
} }
} }

View File

@ -116,9 +116,6 @@ static void DecrementUtilityHookCountersIfNecessary(Node *parsetree);
static bool IsDropSchemaOrDB(Node *parsetree); static bool IsDropSchemaOrDB(Node *parsetree);
static bool ShouldCheckUndistributeCitusLocalTables(void); static bool ShouldCheckUndistributeCitusLocalTables(void);
static bool ShouldAddNewTableToMetadata(Node *parsetree); static bool ShouldAddNewTableToMetadata(Node *parsetree);
static bool ServerUsesPostgresFDW(char *serverName);
static void ErrorIfOptionListHasNoTableName(List *optionList);
/* /*
* ProcessUtilityParseTree is a convenience method to create a PlannedStmt out of * ProcessUtilityParseTree is a convenience method to create a PlannedStmt out of
@ -222,6 +219,23 @@ multi_ProcessUtility(PlannedStmt *pstmt,
PreprocessCreateExtensionStmtForCitusColumnar(parsetree); PreprocessCreateExtensionStmtForCitusColumnar(parsetree);
} }
/*
* Make sure that on DROP DATABASE we terminate the background daemon
* associated with it.
*/
if (IsA(parsetree, DropdbStmt))
{
const bool missingOK = true;
DropdbStmt *dropDbStatement = (DropdbStmt *) parsetree;
char *dbname = dropDbStatement->dbname;
Oid databaseOid = get_database_oid(dbname, missingOK);
if (OidIsValid(databaseOid))
{
StopMaintenanceDaemon(databaseOid);
}
}
if (!CitusHasBeenLoaded()) if (!CitusHasBeenLoaded())
{ {
/* /*
@ -389,7 +403,6 @@ ProcessUtilityInternal(PlannedStmt *pstmt,
Node *parsetree = pstmt->utilityStmt; Node *parsetree = pstmt->utilityStmt;
List *ddlJobs = NIL; List *ddlJobs = NIL;
DistOpsValidationState distOpsValidationState = HasNoneValidObject; DistOpsValidationState distOpsValidationState = HasNoneValidObject;
bool oldSkipConstraintsValidationValue = SkipConstraintValidation;
if (IsA(parsetree, ExplainStmt) && if (IsA(parsetree, ExplainStmt) &&
IsA(((ExplainStmt *) parsetree)->query, Query)) IsA(((ExplainStmt *) parsetree)->query, Query))
@ -608,7 +621,9 @@ ProcessUtilityInternal(PlannedStmt *pstmt,
* Citus intervening. The only exception is partition column drop, in * Citus intervening. The only exception is partition column drop, in
* which case we error out. Advanced Citus users use this to implement their * which case we error out. Advanced Citus users use this to implement their
* own DDL propagation. We also use it to avoid re-propagating DDL commands * own DDL propagation. We also use it to avoid re-propagating DDL commands
* when changing MX tables on workers. * when changing MX tables on workers. Below, we also make sure that DDL
* commands don't run queries that might get intercepted by Citus and error
* out during planning, specifically we skip validation in foreign keys.
*/ */
if (IsA(parsetree, AlterTableStmt)) if (IsA(parsetree, AlterTableStmt))
@ -627,7 +642,33 @@ ProcessUtilityInternal(PlannedStmt *pstmt,
* Note validation is done on the shard level when DDL propagation * Note validation is done on the shard level when DDL propagation
* is enabled. The following eagerly executes some tasks on workers. * is enabled. The following eagerly executes some tasks on workers.
*/ */
SkipForeignKeyValidationIfConstraintIsFkey(alterTableStmt); SkipForeignKeyValidationIfConstraintIsFkey(alterTableStmt, false);
}
}
}
/*
* If we've explicitly set the citus.skip_constraint_validation GUC, then
* we skip validation of any added constraints.
*/
if (IsA(parsetree, AlterTableStmt) && SkipConstraintValidation)
{
AlterTableStmt *alterTableStmt = (AlterTableStmt *) parsetree;
AlterTableCmd *command = NULL;
foreach_ptr(command, alterTableStmt->cmds)
{
AlterTableType alterTableType = command->subtype;
/*
* XXX: In theory we could probably use this GUC to skip validation
* of VALIDATE CONSTRAINT and ALTER CONSTRAINT too. But currently
* this is not needed, so we make its behaviour only apply to ADD
* CONSTRAINT.
*/
if (alterTableType == AT_AddConstraint)
{
Constraint *constraint = (Constraint *) command->def;
constraint->skip_validation = true;
} }
} }
} }
@ -654,22 +695,9 @@ ProcessUtilityInternal(PlannedStmt *pstmt,
} }
/* /*
* Make sure that on DROP DATABASE we terminate the background daemon * Make sure that on DROP EXTENSION we terminate the background daemon
* associated with it. * associated with it.
*/ */
if (IsA(parsetree, DropdbStmt))
{
const bool missingOK = true;
DropdbStmt *dropDbStatement = (DropdbStmt *) parsetree;
char *dbname = dropDbStatement->dbname;
Oid databaseOid = get_database_oid(dbname, missingOK);
if (OidIsValid(databaseOid))
{
StopMaintenanceDaemon(databaseOid);
}
}
if (IsDropCitusExtensionStmt(parsetree)) if (IsDropCitusExtensionStmt(parsetree))
{ {
StopMaintenanceDaemon(MyDatabaseId); StopMaintenanceDaemon(MyDatabaseId);
@ -798,18 +826,6 @@ ProcessUtilityInternal(PlannedStmt *pstmt,
CreateStmt *createTableStmt = (CreateStmt *) (&createForeignTableStmt->base); CreateStmt *createTableStmt = (CreateStmt *) (&createForeignTableStmt->base);
/*
* Error out with a hint if the foreign table is using postgres_fdw and
* the option table_name is not provided.
* Citus relays all the Citus local foreign table logic to the placement of the
* Citus local table. If table_name is NOT provided, Citus would try to talk to
* the foreign postgres table over the shard's table name, which would not exist
* on the remote server.
*/
if (ServerUsesPostgresFDW(createForeignTableStmt->servername))
{
ErrorIfOptionListHasNoTableName(createForeignTableStmt->options);
}
PostprocessCreateTableStmt(createTableStmt, queryString); PostprocessCreateTableStmt(createTableStmt, queryString);
} }
@ -913,8 +929,6 @@ ProcessUtilityInternal(PlannedStmt *pstmt,
*/ */
CitusHasBeenLoaded(); /* lgtm[cpp/return-value-ignored] */ CitusHasBeenLoaded(); /* lgtm[cpp/return-value-ignored] */
} }
SkipConstraintValidation = oldSkipConstraintsValidationValue;
} }
@ -1099,50 +1113,6 @@ ShouldAddNewTableToMetadata(Node *parsetree)
} }
/*
* ServerUsesPostgresFDW gets a foreign server name and returns true if the FDW that
* the server depends on is postgres_fdw. Returns false otherwise.
*/
static bool
ServerUsesPostgresFDW(char *serverName)
{
ForeignServer *server = GetForeignServerByName(serverName, false);
ForeignDataWrapper *fdw = GetForeignDataWrapper(server->fdwid);
if (strcmp(fdw->fdwname, "postgres_fdw") == 0)
{
return true;
}
return false;
}
/*
* ErrorIfOptionListHasNoTableName gets an option list (DefElem) and errors out
* if the list does not contain a table_name element.
*/
static void
ErrorIfOptionListHasNoTableName(List *optionList)
{
char *table_nameString = "table_name";
DefElem *option = NULL;
foreach_ptr(option, optionList)
{
char *optionName = option->defname;
if (strcmp(optionName, table_nameString) == 0)
{
return;
}
}
ereport(ERROR, (errmsg(
"table_name option must be provided when using postgres_fdw with Citus"),
errhint("Provide the option \"table_name\" with value target table's"
" name")));
}
/* /*
* NotifyUtilityHookConstraintDropped sets ConstraintDropped to true to tell us * NotifyUtilityHookConstraintDropped sets ConstraintDropped to true to tell us
* last command dropped a table constraint. * last command dropped a table constraint.

View File

@ -1454,6 +1454,9 @@ AfterXactHostConnectionHandling(ConnectionHashEntry *entry, bool isCommit)
{ {
ShutdownConnection(connection); ShutdownConnection(connection);
/* remove from transactionlist before free-ing */
ResetRemoteTransaction(connection);
/* unlink from list */ /* unlink from list */
dlist_delete(iter.cur); dlist_delete(iter.cur);

View File

@ -168,7 +168,7 @@ AppendAlterTableCmdAddConstraint(StringInfo buf, Constraint *constraint,
if (constraint->contype == CONSTR_PRIMARY) if (constraint->contype == CONSTR_PRIMARY)
{ {
appendStringInfoString(buf, appendStringInfoString(buf,
" PRIMARY KEY ("); " PRIMARY KEY ");
} }
else else
{ {
@ -180,44 +180,15 @@ AppendAlterTableCmdAddConstraint(StringInfo buf, Constraint *constraint,
appendStringInfoString(buf, " NULLS NOT DISTINCT"); appendStringInfoString(buf, " NULLS NOT DISTINCT");
} }
#endif #endif
appendStringInfoString(buf, " (");
} }
ListCell *lc; AppendColumnNameList(buf, constraint->keys);
bool firstkey = true;
foreach(lc, constraint->keys)
{
if (firstkey == false)
{
appendStringInfoString(buf, ", ");
}
appendStringInfo(buf, "%s", quote_identifier(strVal(lfirst(lc))));
firstkey = false;
}
appendStringInfoString(buf, ")");
if (constraint->including != NULL) if (constraint->including != NULL)
{ {
appendStringInfoString(buf, " INCLUDE ("); appendStringInfoString(buf, " INCLUDE ");
firstkey = true; AppendColumnNameList(buf, constraint->including);
foreach(lc, constraint->including)
{
if (firstkey == false)
{
appendStringInfoString(buf, ", ");
}
appendStringInfo(buf, "%s", quote_identifier(strVal(lfirst(
lc))));
firstkey = false;
}
appendStringInfoString(buf, " )");
} }
} }
else if (constraint->contype == CONSTR_EXCLUSION) else if (constraint->contype == CONSTR_EXCLUSION)
@ -404,6 +375,12 @@ AppendAlterTableCmdAddConstraint(StringInfo buf, Constraint *constraint,
} }
} }
/* FOREIGN KEY and CHECK constraints migth have NOT VALID option */
if (constraint->skip_validation)
{
appendStringInfoString(buf, " NOT VALID ");
}
if (constraint->deferrable) if (constraint->deferrable)
{ {
appendStringInfoString(buf, " DEFERRABLE"); appendStringInfoString(buf, " DEFERRABLE");

View File

@ -53,7 +53,6 @@
#include "common/keywords.h" #include "common/keywords.h"
#include "distributed/citus_nodefuncs.h" #include "distributed/citus_nodefuncs.h"
#include "distributed/citus_ruleutils.h" #include "distributed/citus_ruleutils.h"
#include "distributed/multi_router_planner.h"
#include "executor/spi.h" #include "executor/spi.h"
#include "foreign/foreign.h" #include "foreign/foreign.h"
#include "funcapi.h" #include "funcapi.h"
@ -3724,6 +3723,7 @@ static void
get_merge_query_def(Query *query, deparse_context *context) get_merge_query_def(Query *query, deparse_context *context)
{ {
StringInfo buf = context->buf; StringInfo buf = context->buf;
RangeTblEntry *targetRte;
/* Insert the WITH clause if given */ /* Insert the WITH clause if given */
get_with_clause(query, context); get_with_clause(query, context);
@ -3731,7 +3731,7 @@ get_merge_query_def(Query *query, deparse_context *context)
/* /*
* Start the query with MERGE INTO <target> * Start the query with MERGE INTO <target>
*/ */
RangeTblEntry *targetRte = ExtractResultRelationRTE(query); targetRte = rt_fetch(query->resultRelation, query->rtable);
if (PRETTY_INDENT(context)) if (PRETTY_INDENT(context))
{ {
@ -3853,15 +3853,6 @@ get_merge_query_def(Query *query, deparse_context *context)
} }
} }
/*
* RETURNING is not supported in MERGE, so it must be NULL, but if PG adds it later
* we might miss it, let's raise an exception to investigate.
*/
if (unlikely(query->returningList))
{
elog(ERROR, "Unexpected RETURNING clause in MERGE");
}
ereport(DEBUG1, (errmsg("<Deparsed MERGE query: %s>", buf->data))); ereport(DEBUG1, (errmsg("<Deparsed MERGE query: %s>", buf->data)));
} }

View File

@ -861,11 +861,6 @@ AlterTableConstraintCheck(QueryDesc *queryDesc)
return false; return false;
} }
if (SkipConstraintValidation)
{
return true;
}
/* /*
* While an ALTER TABLE is in progress, we might do SELECTs on some * While an ALTER TABLE is in progress, we might do SELECTs on some
* catalog tables too. For example, when dropping a column, citus_drop_trigger() * catalog tables too. For example, when dropping a column, citus_drop_trigger()

View File

@ -1936,8 +1936,11 @@ RebalanceTableShardsBackground(RebalanceOptions *options, Oid shardReplicationMo
List *referenceTableIdList = NIL; List *referenceTableIdList = NIL;
if (HasNodesWithMissingReferenceTables(&referenceTableIdList)) if (HasNodesWithMissingReferenceTables(&referenceTableIdList))
{
if (shardTransferMode == TRANSFER_MODE_AUTOMATIC)
{ {
VerifyTablesHaveReplicaIdentity(referenceTableIdList); VerifyTablesHaveReplicaIdentity(referenceTableIdList);
}
/* /*
* Reference tables need to be copied to (newly-added) nodes, this needs to be the * Reference tables need to be copied to (newly-added) nodes, this needs to be the

View File

@ -338,6 +338,10 @@ FindCitusExtradataContainerRTE(Node *node, RangeTblEntry **result)
{ {
RangeTblFunction *rangeTblFunction = (RangeTblFunction *) linitial( RangeTblFunction *rangeTblFunction = (RangeTblFunction *) linitial(
rangeTblEntry->functions); rangeTblEntry->functions);
if (!IsA(rangeTblFunction->funcexpr, FuncExpr))
{
return false;
}
FuncExpr *funcExpr = castNode(FuncExpr, rangeTblFunction->funcexpr); FuncExpr *funcExpr = castNode(FuncExpr, rangeTblFunction->funcexpr);
if (funcExpr->funcid == CitusExtraDataContainerFuncId()) if (funcExpr->funcid == CitusExtraDataContainerFuncId())
{ {

View File

@ -75,6 +75,9 @@ static uint64 NextPlanId = 1;
/* keep track of planner call stack levels */ /* keep track of planner call stack levels */
int PlannerLevel = 0; int PlannerLevel = 0;
static void ErrorIfQueryHasUnsupportedMergeCommand(Query *queryTree,
List *rangeTableList);
static bool ContainsMergeCommandWalker(Node *node);
static bool ListContainsDistributedTableRTE(List *rangeTableList, static bool ListContainsDistributedTableRTE(List *rangeTableList,
bool *maybeHasForeignDistributedTable); bool *maybeHasForeignDistributedTable);
static bool IsUpdateOrDelete(Query *query); static bool IsUpdateOrDelete(Query *query);
@ -129,7 +132,7 @@ static PlannedStmt * PlanDistributedStmt(DistributedPlanningContext *planContext
static RTEListProperties * GetRTEListProperties(List *rangeTableList); static RTEListProperties * GetRTEListProperties(List *rangeTableList);
static List * TranslatedVars(PlannerInfo *root, int relationIndex); static List * TranslatedVars(PlannerInfo *root, int relationIndex);
static void WarnIfListHasForeignDistributedTable(List *rangeTableList); static void WarnIfListHasForeignDistributedTable(List *rangeTableList);
static void ErrorIfMergeHasUnsupportedTables(Query *parse, List *rangeTableList);
/* Distributed planner hook */ /* Distributed planner hook */
PlannedStmt * PlannedStmt *
@ -197,6 +200,12 @@ distributed_planner(Query *parse,
if (!fastPathRouterQuery) if (!fastPathRouterQuery)
{ {
/*
* Fast path queries cannot have merge command, and we
* prevent the remaining here.
*/
ErrorIfQueryHasUnsupportedMergeCommand(parse, rangeTableList);
/* /*
* When there are partitioned tables (not applicable to fast path), * When there are partitioned tables (not applicable to fast path),
* pretend that they are regular tables to avoid unnecessary work * pretend that they are regular tables to avoid unnecessary work
@ -295,11 +304,44 @@ distributed_planner(Query *parse,
} }
/*
* ErrorIfQueryHasUnsupportedMergeCommand walks over the query tree and bails out
* if there is no Merge command (e.g., CMD_MERGE) in the query tree. For merge,
* looks for all supported combinations, throws an exception if any violations
* are seen.
*/
static void
ErrorIfQueryHasUnsupportedMergeCommand(Query *queryTree, List *rangeTableList)
{
/*
* Postgres currently doesn't support Merge queries inside subqueries and
* ctes, but lets be defensive and do query tree walk anyway.
*
* We do not call this path for fast-path queries to avoid this additional
* overhead.
*/
if (!ContainsMergeCommandWalker((Node *) queryTree))
{
/* No MERGE found */
return;
}
/*
* In Citus we have limited support for MERGE, it's allowed
* only if all the tables(target, source or any CTE) tables
* are are local i.e. a combination of Citus local and Non-Citus
* tables (regular Postgres tables).
*/
ErrorIfMergeHasUnsupportedTables(queryTree, rangeTableList);
}
/* /*
* ContainsMergeCommandWalker walks over the node and finds if there are any * ContainsMergeCommandWalker walks over the node and finds if there are any
* Merge command (e.g., CMD_MERGE) in the node. * Merge command (e.g., CMD_MERGE) in the node.
*/ */
bool static bool
ContainsMergeCommandWalker(Node *node) ContainsMergeCommandWalker(Node *node)
{ {
#if PG_VERSION_NUM < PG_VERSION_15 #if PG_VERSION_NUM < PG_VERSION_15
@ -634,8 +676,7 @@ bool
IsUpdateOrDelete(Query *query) IsUpdateOrDelete(Query *query)
{ {
return query->commandType == CMD_UPDATE || return query->commandType == CMD_UPDATE ||
query->commandType == CMD_DELETE || query->commandType == CMD_DELETE;
query->commandType == CMD_MERGE;
} }
@ -749,8 +790,11 @@ CreateDistributedPlannedStmt(DistributedPlanningContext *planContext)
hasUnresolvedParams = true; hasUnresolvedParams = true;
} }
bool allowRecursivePlanning = true;
DistributedPlan *distributedPlan = DistributedPlan *distributedPlan =
CreateDistributedPlan(planId, planContext->originalQuery, planContext->query, CreateDistributedPlan(planId, allowRecursivePlanning,
planContext->originalQuery,
planContext->query,
planContext->boundParams, planContext->boundParams,
hasUnresolvedParams, hasUnresolvedParams,
planContext->plannerRestrictionContext); planContext->plannerRestrictionContext);
@ -921,8 +965,8 @@ TryCreateDistributedPlannedStmt(PlannedStmt *localPlan,
* 3. Logical planner * 3. Logical planner
*/ */
DistributedPlan * DistributedPlan *
CreateDistributedPlan(uint64 planId, Query *originalQuery, Query *query, ParamListInfo CreateDistributedPlan(uint64 planId, bool allowRecursivePlanning, Query *originalQuery,
boundParams, bool hasUnresolvedParams, Query *query, ParamListInfo boundParams, bool hasUnresolvedParams,
PlannerRestrictionContext *plannerRestrictionContext) PlannerRestrictionContext *plannerRestrictionContext)
{ {
DistributedPlan *distributedPlan = NULL; DistributedPlan *distributedPlan = NULL;
@ -1060,6 +1104,21 @@ CreateDistributedPlan(uint64 planId, Query *originalQuery, Query *query, ParamLi
*/ */
if (list_length(subPlanList) > 0 || hasCtes) if (list_length(subPlanList) > 0 || hasCtes)
{ {
/*
* recursive planner should handle all the tree from bottom to
* top at single pass. i.e. It should have already recursively planned all
* required parts in its first pass. Hence, we expect allowRecursivePlanning
* to be true. Otherwise, this means we have bug at recursive planner,
* which needs to be handled. We add a check here and return error.
*/
if (!allowRecursivePlanning)
{
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("recursive complex joins are only supported "
"when all distributed tables are co-located and "
"joined on their distribution columns")));
}
Query *newQuery = copyObject(originalQuery); Query *newQuery = copyObject(originalQuery);
bool setPartitionedTablesInherited = false; bool setPartitionedTablesInherited = false;
PlannerRestrictionContext *currentPlannerRestrictionContext = PlannerRestrictionContext *currentPlannerRestrictionContext =
@ -1088,8 +1147,14 @@ CreateDistributedPlan(uint64 planId, Query *originalQuery, Query *query, ParamLi
/* overwrite the old transformed query with the new transformed query */ /* overwrite the old transformed query with the new transformed query */
*query = *newQuery; *query = *newQuery;
/* recurse into CreateDistributedPlan with subqueries/CTEs replaced */ /*
distributedPlan = CreateDistributedPlan(planId, originalQuery, query, NULL, false, * recurse into CreateDistributedPlan with subqueries/CTEs replaced.
* We only allow recursive planning once, which should have already done all
* the necessary transformations. So, we do not allow recursive planning once again.
*/
allowRecursivePlanning = false;
distributedPlan = CreateDistributedPlan(planId, allowRecursivePlanning,
originalQuery, query, NULL, false,
plannerRestrictionContext); plannerRestrictionContext);
/* distributedPlan cannot be null since hasUnresolvedParams argument was false */ /* distributedPlan cannot be null since hasUnresolvedParams argument was false */
@ -2546,3 +2611,148 @@ WarnIfListHasForeignDistributedTable(List *rangeTableList)
} }
} }
} }
/*
* IsMergeAllowedOnRelation takes a relation entry and checks if MERGE command is
* permitted on special relations, such as materialized view, returns true only if
* it's a "source" relation.
*/
bool
IsMergeAllowedOnRelation(Query *parse, RangeTblEntry *rte)
{
if (!IsMergeQuery(parse))
{
return false;
}
RangeTblEntry *targetRte = rt_fetch(parse->resultRelation, parse->rtable);
/* Is it a target relation? */
if (targetRte->relid == rte->relid)
{
return false;
}
return true;
}
/*
* ErrorIfMergeHasUnsupportedTables checks if all the tables(target, source or any CTE
* present) in the MERGE command are local i.e. a combination of Citus local and Non-Citus
* tables (regular Postgres tables), raises an exception for all other combinations.
*/
static void
ErrorIfMergeHasUnsupportedTables(Query *parse, List *rangeTableList)
{
ListCell *tableCell = NULL;
foreach(tableCell, rangeTableList)
{
RangeTblEntry *rangeTableEntry = (RangeTblEntry *) lfirst(tableCell);
Oid relationId = rangeTableEntry->relid;
switch (rangeTableEntry->rtekind)
{
case RTE_RELATION:
{
/* Check the relation type */
break;
}
case RTE_SUBQUERY:
case RTE_FUNCTION:
case RTE_TABLEFUNC:
case RTE_VALUES:
case RTE_JOIN:
case RTE_CTE:
{
/* Skip them as base table(s) will be checked */
continue;
}
/*
* RTE_NAMEDTUPLESTORE is typically used in ephmeral named relations,
* such as, trigger data; until we find a genuine use case, raise an
* exception.
* RTE_RESULT is a node added by the planner and we shouldn't
* encounter it in the parse tree.
*/
case RTE_NAMEDTUPLESTORE:
case RTE_RESULT:
{
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("MERGE command is not supported with "
"Tuplestores and results")));
break;
}
default:
{
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("MERGE command: Unrecognized range table entry.")));
}
}
/* RTE Relation can be of various types, check them now */
/* skip the regular views as they are replaced with subqueries */
if (rangeTableEntry->relkind == RELKIND_VIEW)
{
continue;
}
if (rangeTableEntry->relkind == RELKIND_MATVIEW ||
rangeTableEntry->relkind == RELKIND_FOREIGN_TABLE)
{
/* Materialized view or Foreign table as target is not allowed */
if (IsMergeAllowedOnRelation(parse, rangeTableEntry))
{
/* Non target relation is ok */
continue;
}
else
{
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("MERGE command is not allowed "
"on materialized view")));
}
}
if (rangeTableEntry->relkind != RELKIND_RELATION &&
rangeTableEntry->relkind != RELKIND_PARTITIONED_TABLE)
{
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("Unexpected relation type(relkind:%c) in MERGE command",
rangeTableEntry->relkind)));
}
Assert(rangeTableEntry->relid != 0);
/* Distributed tables and Reference tables are not supported yet */
if (IsCitusTableType(relationId, REFERENCE_TABLE) ||
IsCitusTableType(relationId, DISTRIBUTED_TABLE))
{
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("MERGE command is not supported on "
"distributed/reference tables yet")));
}
/* Regular Postgres tables and Citus local tables are allowed */
if (!IsCitusTable(relationId) ||
IsCitusTableType(relationId, CITUS_LOCAL_TABLE))
{
continue;
}
/* Any other Citus table type missing ? */
}
/* All the tables are local, supported */
}

View File

@ -54,6 +54,8 @@
bool EnableFastPathRouterPlanner = true; bool EnableFastPathRouterPlanner = true;
static bool ColumnAppearsMultipleTimes(Node *quals, Var *distributionKey); static bool ColumnAppearsMultipleTimes(Node *quals, Var *distributionKey);
static bool ConjunctionContainsColumnFilter(Node *node, Var *column,
Node **distributionKeyValue);
static bool DistKeyInSimpleOpExpression(Expr *clause, Var *distColumn, static bool DistKeyInSimpleOpExpression(Expr *clause, Var *distColumn,
Node **distributionKeyValue); Node **distributionKeyValue);
@ -292,7 +294,7 @@ ColumnAppearsMultipleTimes(Node *quals, Var *distributionKey)
* *
* If the conjuction contains column filter which is const, distributionKeyValue is set. * If the conjuction contains column filter which is const, distributionKeyValue is set.
*/ */
bool static bool
ConjunctionContainsColumnFilter(Node *node, Var *column, Node **distributionKeyValue) ConjunctionContainsColumnFilter(Node *node, Var *column, Node **distributionKeyValue)
{ {
if (node == NULL) if (node == NULL)

View File

@ -384,7 +384,9 @@ CreateInsertSelectIntoLocalTablePlan(uint64 planId, Query *insertSelectQuery,
/* get the SELECT query (may have changed after PrepareInsertSelectForCitusPlanner) */ /* get the SELECT query (may have changed after PrepareInsertSelectForCitusPlanner) */
Query *selectQuery = selectRte->subquery; Query *selectQuery = selectRte->subquery;
DistributedPlan *distPlan = CreateDistributedPlan(planId, selectQuery, bool allowRecursivePlanning = true;
DistributedPlan *distPlan = CreateDistributedPlan(planId, allowRecursivePlanning,
selectQuery,
copyObject(selectQuery), copyObject(selectQuery),
boundParams, hasUnresolvedParams, boundParams, hasUnresolvedParams,
plannerRestrictionContext); plannerRestrictionContext);

View File

@ -164,7 +164,6 @@ static uint32 HashPartitionCount(void);
static Job * BuildJobTreeTaskList(Job *jobTree, static Job * BuildJobTreeTaskList(Job *jobTree,
PlannerRestrictionContext *plannerRestrictionContext); PlannerRestrictionContext *plannerRestrictionContext);
static bool IsInnerTableOfOuterJoin(RelationRestriction *relationRestriction); static bool IsInnerTableOfOuterJoin(RelationRestriction *relationRestriction);
static bool IsOuterTableOfOuterJoin(RelationRestriction *relationRestriction);
static void ErrorIfUnsupportedShardDistribution(Query *query); static void ErrorIfUnsupportedShardDistribution(Query *query);
static Task * QueryPushdownTaskCreate(Query *originalQuery, int shardIndex, static Task * QueryPushdownTaskCreate(Query *originalQuery, int shardIndex,
RelationRestrictionContext *restrictionContext, RelationRestrictionContext *restrictionContext,
@ -2226,32 +2225,17 @@ QueryPushdownSqlTaskList(Query *query, uint64 jobId,
} }
/* /*
* Skip adding shards of non-target (outer)relations. * For left joins we don't care about the shards pruned for the right hand side.
* Note: This is a stop-gap arrangement for phase-I where in sql * If the right hand side would prune to a smaller set we should still send it to
* generates a single task on the shard identified by constant * all tables of the left hand side. However if the right hand side is bigger than
* qual(filter) on the target relation. * the left hand side we don't have to send the query to any shard that is not
*/ * matching anything on the left hand side.
if (IsMergeQuery(query) &&
IsOuterTableOfOuterJoin(relationRestriction))
{
continue;
}
else if (!IsMergeQuery(query) &&
IsInnerTableOfOuterJoin(relationRestriction))
{
/*
* For left joins we don't care about the shards pruned for
* the right hand side. If the right hand side would prune
* to a smaller set we should still send it to all tables
* of the left hand side. However if the right hand side is
* bigger than the left hand side we don't have to send the
* query to any shard that is not matching anything on the
* left hand side.
* *
* Instead we will simply skip any RelationRestriction if it * Instead we will simply skip any RelationRestriction if it is an OUTER join and
* is an OUTER join and the table is part of the non-outer * the table is part of the non-outer side of the join.
* side of the join.
*/ */
if (IsInnerTableOfOuterJoin(relationRestriction))
{
continue; continue;
} }
@ -2318,45 +2302,6 @@ QueryPushdownSqlTaskList(Query *query, uint64 jobId,
} }
/*
* IsOuterTableOfOuterJoin tests based on the join information envoded in a
* RelationRestriction if the table accessed for this relation is
* a) in an outer join
* b) on the outer part of said join
*
* The function returns true only if both conditions above hold true
*/
static bool
IsOuterTableOfOuterJoin(RelationRestriction *relationRestriction)
{
RestrictInfo *joinInfo = NULL;
foreach_ptr(joinInfo, relationRestriction->relOptInfo->joininfo)
{
if (joinInfo->outer_relids == NULL)
{
/* not an outer join */
continue;
}
/*
* This join restriction info describes an outer join, we need to figure out if
* our table is in the outer part of this join. If that is the case this is a
* outer table of an outer join.
*/
bool isInOuter = bms_is_member(relationRestriction->relOptInfo->relid,
joinInfo->outer_relids);
if (isInOuter)
{
/* this table is joined in the outer part of an outer join */
return true;
}
}
/* we have not found any join clause that satisfies both requirements */
return false;
}
/* /*
* IsInnerTableOfOuterJoin tests based on the join information envoded in a * IsInnerTableOfOuterJoin tests based on the join information envoded in a
* RelationRestriction if the table accessed for this relation is * RelationRestriction if the table accessed for this relation is

View File

@ -121,6 +121,7 @@ static void CreateSingleTaskRouterSelectPlan(DistributedPlan *distributedPlan,
Query *query, Query *query,
PlannerRestrictionContext * PlannerRestrictionContext *
plannerRestrictionContext); plannerRestrictionContext);
static Oid ResultRelationOidForQuery(Query *query);
static bool IsTidColumn(Node *node); static bool IsTidColumn(Node *node);
static DeferredErrorMessage * ModifyPartialQuerySupported(Query *queryTree, bool static DeferredErrorMessage * ModifyPartialQuerySupported(Query *queryTree, bool
multiShardQuery, multiShardQuery,
@ -179,24 +180,6 @@ static void ReorderTaskPlacementsByTaskAssignmentPolicy(Job *job,
static bool ModifiesLocalTableWithRemoteCitusLocalTable(List *rangeTableList); static bool ModifiesLocalTableWithRemoteCitusLocalTable(List *rangeTableList);
static DeferredErrorMessage * DeferErrorIfUnsupportedLocalTableJoin(List *rangeTableList); static DeferredErrorMessage * DeferErrorIfUnsupportedLocalTableJoin(List *rangeTableList);
static bool IsLocallyAccessibleCitusLocalTable(Oid relationId); static bool IsLocallyAccessibleCitusLocalTable(Oid relationId);
static bool QueryHasMergeCommand(Query *queryTree);
static DeferredErrorMessage * MergeQuerySupported(Query *originalQuery,
PlannerRestrictionContext *
plannerRestrictionContext);
static DeferredErrorMessage * ErrorIfMergeHasUnsupportedTables(Query *parse,
List *rangeTableList,
PlannerRestrictionContext *
restrictionContext);
static DeferredErrorMessage * ErrorIfDistTablesNotColocated(Query *parse,
List *distTablesList,
PlannerRestrictionContext *
plannerRestrictionContext);
static DeferredErrorMessage * TargetlistAndFunctionsSupported(Oid resultRelationId,
FromExpr *joinTree,
Node *quals,
List *targetList,
CmdType commandType,
List *returningList);
/* /*
@ -462,7 +445,7 @@ ModifyQueryResultRelationId(Query *query)
* ResultRelationOidForQuery returns the OID of the relation this is modified * ResultRelationOidForQuery returns the OID of the relation this is modified
* by a given query. * by a given query.
*/ */
Oid static Oid
ResultRelationOidForQuery(Query *query) ResultRelationOidForQuery(Query *query)
{ {
RangeTblEntry *resultRTE = rt_fetch(query->resultRelation, query->rtable); RangeTblEntry *resultRTE = rt_fetch(query->resultRelation, query->rtable);
@ -529,161 +512,6 @@ IsTidColumn(Node *node)
} }
/*
* TargetlistAndFunctionsSupported implements a subset of what ModifyPartialQuerySupported
* checks, that subset being checking what functions are allowed, if we are
* updating distribution column, etc.
* Note: This subset of checks are repeated for each MERGE modify action.
*/
static DeferredErrorMessage *
TargetlistAndFunctionsSupported(Oid resultRelationId, FromExpr *joinTree, Node *quals,
List *targetList,
CmdType commandType, List *returningList)
{
uint32 rangeTableId = 1;
Var *partitionColumn = NULL;
if (IsCitusTable(resultRelationId))
{
partitionColumn = PartitionColumn(resultRelationId, rangeTableId);
}
bool hasVarArgument = false; /* A STABLE function is passed a Var argument */
bool hasBadCoalesce = false; /* CASE/COALESCE passed a mutable function */
ListCell *targetEntryCell = NULL;
foreach(targetEntryCell, targetList)
{
TargetEntry *targetEntry = (TargetEntry *) lfirst(targetEntryCell);
/* skip resjunk entries: UPDATE adds some for ctid, etc. */
if (targetEntry->resjunk)
{
continue;
}
bool targetEntryPartitionColumn = false;
AttrNumber targetColumnAttrNumber = InvalidAttrNumber;
/* reference tables do not have partition column */
if (partitionColumn == NULL)
{
targetEntryPartitionColumn = false;
}
else
{
if (commandType == CMD_UPDATE)
{
/*
* Note that it is not possible to give an alias to
* UPDATE table SET ...
*/
if (targetEntry->resname)
{
targetColumnAttrNumber = get_attnum(resultRelationId,
targetEntry->resname);
if (targetColumnAttrNumber == partitionColumn->varattno)
{
targetEntryPartitionColumn = true;
}
}
}
}
if (commandType == CMD_UPDATE &&
FindNodeMatchingCheckFunction((Node *) targetEntry->expr,
CitusIsVolatileFunction))
{
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
"functions used in UPDATE queries on distributed "
"tables must not be VOLATILE",
NULL, NULL);
}
if (commandType == CMD_UPDATE && targetEntryPartitionColumn &&
TargetEntryChangesValue(targetEntry, partitionColumn,
joinTree))
{
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
"modifying the partition value of rows is not "
"allowed",
NULL, NULL);
}
if (commandType == CMD_UPDATE &&
MasterIrreducibleExpression((Node *) targetEntry->expr,
&hasVarArgument, &hasBadCoalesce))
{
Assert(hasVarArgument || hasBadCoalesce);
}
if (FindNodeMatchingCheckFunction((Node *) targetEntry->expr,
NodeIsFieldStore))
{
/* DELETE cannot do field indirection already */
Assert(commandType == CMD_UPDATE || commandType == CMD_INSERT);
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
"inserting or modifying composite type fields is not "
"supported", NULL,
"Use the column name to insert or update the composite "
"type as a single value");
}
}
if (joinTree != NULL)
{
if (FindNodeMatchingCheckFunction((Node *) quals,
CitusIsVolatileFunction))
{
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
"functions used in the WHERE/ON/WHEN clause of modification "
"queries on distributed tables must not be VOLATILE",
NULL, NULL);
}
else if (MasterIrreducibleExpression(quals, &hasVarArgument,
&hasBadCoalesce))
{
Assert(hasVarArgument || hasBadCoalesce);
}
}
if (hasVarArgument)
{
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
"STABLE functions used in UPDATE queries "
"cannot be called with column references",
NULL, NULL);
}
if (hasBadCoalesce)
{
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
"non-IMMUTABLE functions are not allowed in CASE or "
"COALESCE statements",
NULL, NULL);
}
if (contain_mutable_functions((Node *) returningList))
{
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
"non-IMMUTABLE functions are not allowed in the "
"RETURNING clause",
NULL, NULL);
}
if (quals != NULL &&
nodeTag(quals) == T_CurrentOfExpr)
{
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
"cannot run DML queries with cursors", NULL,
NULL);
}
return NULL;
}
/* /*
* ModifyPartialQuerySupported implements a subset of what ModifyQuerySupported checks, * ModifyPartialQuerySupported implements a subset of what ModifyQuerySupported checks,
* that subset being what's necessary to check modifying CTEs for. * that subset being what's necessary to check modifying CTEs for.
@ -792,21 +620,148 @@ ModifyPartialQuerySupported(Query *queryTree, bool multiShardQuery,
Oid resultRelationId = ModifyQueryResultRelationId(queryTree); Oid resultRelationId = ModifyQueryResultRelationId(queryTree);
*distributedTableIdOutput = resultRelationId; *distributedTableIdOutput = resultRelationId;
uint32 rangeTableId = 1;
Var *partitionColumn = NULL;
if (IsCitusTable(resultRelationId))
{
partitionColumn = PartitionColumn(resultRelationId, rangeTableId);
}
commandType = queryTree->commandType; commandType = queryTree->commandType;
if (commandType == CMD_INSERT || commandType == CMD_UPDATE || if (commandType == CMD_INSERT || commandType == CMD_UPDATE ||
commandType == CMD_DELETE) commandType == CMD_DELETE)
{ {
deferredError = bool hasVarArgument = false; /* A STABLE function is passed a Var argument */
TargetlistAndFunctionsSupported(resultRelationId, bool hasBadCoalesce = false; /* CASE/COALESCE passed a mutable function */
queryTree->jointree, FromExpr *joinTree = queryTree->jointree;
queryTree->jointree->quals, ListCell *targetEntryCell = NULL;
queryTree->targetList,
commandType, foreach(targetEntryCell, queryTree->targetList)
queryTree->returningList);
if (deferredError)
{ {
return deferredError; TargetEntry *targetEntry = (TargetEntry *) lfirst(targetEntryCell);
/* skip resjunk entries: UPDATE adds some for ctid, etc. */
if (targetEntry->resjunk)
{
continue;
}
bool targetEntryPartitionColumn = false;
AttrNumber targetColumnAttrNumber = InvalidAttrNumber;
/* reference tables do not have partition column */
if (partitionColumn == NULL)
{
targetEntryPartitionColumn = false;
}
else
{
if (commandType == CMD_UPDATE)
{
/*
* Note that it is not possible to give an alias to
* UPDATE table SET ...
*/
if (targetEntry->resname)
{
targetColumnAttrNumber = get_attnum(resultRelationId,
targetEntry->resname);
if (targetColumnAttrNumber == partitionColumn->varattno)
{
targetEntryPartitionColumn = true;
}
}
}
}
if (commandType == CMD_UPDATE &&
FindNodeMatchingCheckFunction((Node *) targetEntry->expr,
CitusIsVolatileFunction))
{
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
"functions used in UPDATE queries on distributed "
"tables must not be VOLATILE",
NULL, NULL);
}
if (commandType == CMD_UPDATE && targetEntryPartitionColumn &&
TargetEntryChangesValue(targetEntry, partitionColumn,
queryTree->jointree))
{
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
"modifying the partition value of rows is not "
"allowed",
NULL, NULL);
}
if (commandType == CMD_UPDATE &&
MasterIrreducibleExpression((Node *) targetEntry->expr,
&hasVarArgument, &hasBadCoalesce))
{
Assert(hasVarArgument || hasBadCoalesce);
}
if (FindNodeMatchingCheckFunction((Node *) targetEntry->expr,
NodeIsFieldStore))
{
/* DELETE cannot do field indirection already */
Assert(commandType == CMD_UPDATE || commandType == CMD_INSERT);
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
"inserting or modifying composite type fields is not "
"supported", NULL,
"Use the column name to insert or update the composite "
"type as a single value");
}
}
if (joinTree != NULL)
{
if (FindNodeMatchingCheckFunction((Node *) joinTree->quals,
CitusIsVolatileFunction))
{
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
"functions used in the WHERE clause of modification "
"queries on distributed tables must not be VOLATILE",
NULL, NULL);
}
else if (MasterIrreducibleExpression(joinTree->quals, &hasVarArgument,
&hasBadCoalesce))
{
Assert(hasVarArgument || hasBadCoalesce);
}
}
if (hasVarArgument)
{
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
"STABLE functions used in UPDATE queries "
"cannot be called with column references",
NULL, NULL);
}
if (hasBadCoalesce)
{
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
"non-IMMUTABLE functions are not allowed in CASE or "
"COALESCE statements",
NULL, NULL);
}
if (contain_mutable_functions((Node *) queryTree->returningList))
{
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
"non-IMMUTABLE functions are not allowed in the "
"RETURNING clause",
NULL, NULL);
}
if (queryTree->jointree->quals != NULL &&
nodeTag(queryTree->jointree->quals) == T_CurrentOfExpr)
{
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
"cannot run DML queries with cursors", NULL,
NULL);
} }
} }
@ -918,85 +873,6 @@ NodeIsFieldStore(Node *node)
} }
/*
* MergeQuerySupported does check for a MERGE command in the query, if it finds
* one, it will verify the below criteria
* - Supported tables and combinations in ErrorIfMergeHasUnsupportedTables
* - Distributed tables requirements in ErrorIfDistTablesNotColocated
* - Checks target-lists and functions-in-quals in TargetlistAndFunctionsSupported
*/
static DeferredErrorMessage *
MergeQuerySupported(Query *originalQuery,
PlannerRestrictionContext *plannerRestrictionContext)
{
/* For non-MERGE commands it's a no-op */
if (!QueryHasMergeCommand(originalQuery))
{
return NULL;
}
List *rangeTableList = ExtractRangeTableEntryList(originalQuery);
RangeTblEntry *resultRte = ExtractResultRelationRTE(originalQuery);
/*
* Fast path queries cannot have merge command, and we prevent the remaining here.
* In Citus we have limited support for MERGE, it's allowed only if all
* the tables(target, source or any CTE) tables are are local i.e. a
* combination of Citus local and Non-Citus tables (regular Postgres tables)
* or distributed tables with some restrictions, please see header of routine
* ErrorIfDistTablesNotColocated for details.
*/
DeferredErrorMessage *deferredError =
ErrorIfMergeHasUnsupportedTables(originalQuery,
rangeTableList,
plannerRestrictionContext);
if (deferredError)
{
return deferredError;
}
Oid resultRelationId = resultRte->relid;
deferredError =
TargetlistAndFunctionsSupported(resultRelationId,
originalQuery->jointree,
originalQuery->jointree->quals,
originalQuery->targetList,
originalQuery->commandType,
originalQuery->returningList);
if (deferredError)
{
return deferredError;
}
#if PG_VERSION_NUM >= PG_VERSION_15
/*
* MERGE is a special case where we have multiple modify statements
* within itself. Check each INSERT/UPDATE/DELETE individually.
*/
MergeAction *action = NULL;
foreach_ptr(action, originalQuery->mergeActionList)
{
Assert(originalQuery->returningList == NULL);
deferredError =
TargetlistAndFunctionsSupported(resultRelationId,
originalQuery->jointree,
action->qual,
action->targetList,
action->commandType,
originalQuery->returningList);
if (deferredError)
{
return deferredError;
}
}
#endif
return NULL;
}
/* /*
* ModifyQuerySupported returns NULL if the query only contains supported * ModifyQuerySupported returns NULL if the query only contains supported
* features, otherwise it returns an error description. * features, otherwise it returns an error description.
@ -1012,17 +888,8 @@ ModifyQuerySupported(Query *queryTree, Query *originalQuery, bool multiShardQuer
PlannerRestrictionContext *plannerRestrictionContext) PlannerRestrictionContext *plannerRestrictionContext)
{ {
Oid distributedTableId = InvalidOid; Oid distributedTableId = InvalidOid;
DeferredErrorMessage *error = MergeQuerySupported(originalQuery, DeferredErrorMessage *error = ModifyPartialQuerySupported(queryTree, multiShardQuery,
plannerRestrictionContext); &distributedTableId);
if (error)
{
/*
* For MERGE, we do not do recursive plannning, simply bail out.
*/
RaiseDeferredError(error, ERROR);
}
error = ModifyPartialQuerySupported(queryTree, multiShardQuery, &distributedTableId);
if (error) if (error)
{ {
return error; return error;
@ -4074,288 +3941,3 @@ CompareInsertValuesByShardId(const void *leftElement, const void *rightElement)
} }
} }
} }
/*
* IsMergeAllowedOnRelation takes a relation entry and checks if MERGE command is
* permitted on special relations, such as materialized view, returns true only if
* it's a "source" relation.
*/
bool
IsMergeAllowedOnRelation(Query *parse, RangeTblEntry *rte)
{
if (!IsMergeQuery(parse))
{
return false;
}
RangeTblEntry *targetRte = rt_fetch(parse->resultRelation, parse->rtable);
/* Is it a target relation? */
if (targetRte->relid == rte->relid)
{
return false;
}
return true;
}
/*
* ErrorIfDistTablesNotColocated Checks to see if
*
* - There are a minimum of two distributed tables (source and a target).
* - All the distributed tables are indeed colocated.
* - MERGE relations are joined on the distribution column
* MERGE .. USING .. ON target.dist_key = source.dist_key
* - The query should touch only a single shard i.e. JOIN AND with a constant qual
* MERGE .. USING .. ON target.dist_key = source.dist_key AND target.dist_key = <>
*
* If any of the conditions are not met, it raises an exception.
*/
static DeferredErrorMessage *
ErrorIfDistTablesNotColocated(Query *parse, List *distTablesList,
PlannerRestrictionContext *plannerRestrictionContext)
{
/* All MERGE tables must be distributed */
if (list_length(distTablesList) < 2)
{
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
"For MERGE command, both the source and target "
"must be distributed", NULL, NULL);
}
/* All distributed tables must be colocated */
if (!AllRelationsInListColocated(distTablesList, RANGETABLE_ENTRY))
{
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
"For MERGE command, all the distributed tables "
"must be colocated", NULL, NULL);
}
/* Are source and target tables joined on distribution column? */
if (!RestrictionEquivalenceForPartitionKeys(plannerRestrictionContext))
{
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
"MERGE command is only supported when distributed "
"tables are joined on their distribution column",
NULL, NULL);
}
/* Look for a constant qual i.e. AND target.dist_key = <> */
Node *distributionKeyValue = NULL;
Oid targetRelId = ResultRelationOidForQuery(parse);
Var *distributionKey = PartitionColumn(targetRelId, 1);
Assert(distributionKey);
/* convert list of expressions into expression tree for further processing */
Node *quals = parse->jointree->quals;
if (quals && IsA(quals, List))
{
quals = (Node *) make_ands_explicit((List *) quals);
}
if (!ConjunctionContainsColumnFilter(quals, distributionKey, &distributionKeyValue))
{
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
"MERGE on a distributed table requires a constant filter "
"on the distribution column of the target table", NULL,
"Consider adding AND target.dist_key = <> to the ON clause");
}
return NULL;
}
/*
* ErrorIfMergeHasUnsupportedTables checks if all the tables(target, source or any CTE
* present) in the MERGE command are local i.e. a combination of Citus local and Non-Citus
* tables (regular Postgres tables), or distributed tables with some restrictions, please
* see header of routine ErrorIfDistTablesNotColocated for details, raises an exception
* for all other combinations.
*/
static DeferredErrorMessage *
ErrorIfMergeHasUnsupportedTables(Query *parse, List *rangeTableList,
PlannerRestrictionContext *restrictionContext)
{
List *distTablesList = NIL;
bool foundLocalTables = false;
RangeTblEntry *rangeTableEntry = NULL;
foreach_ptr(rangeTableEntry, rangeTableList)
{
Oid relationId = rangeTableEntry->relid;
switch (rangeTableEntry->rtekind)
{
case RTE_RELATION:
{
/* Check the relation type */
break;
}
case RTE_SUBQUERY:
case RTE_FUNCTION:
case RTE_TABLEFUNC:
case RTE_VALUES:
case RTE_JOIN:
case RTE_CTE:
{
/* Skip them as base table(s) will be checked */
continue;
}
/*
* RTE_NAMEDTUPLESTORE is typically used in ephmeral named relations,
* such as, trigger data; until we find a genuine use case, raise an
* exception.
* RTE_RESULT is a node added by the planner and we shouldn't
* encounter it in the parse tree.
*/
case RTE_NAMEDTUPLESTORE:
case RTE_RESULT:
{
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
"MERGE command is not supported with "
"Tuplestores and results",
NULL, NULL);
}
default:
{
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
"MERGE command: Unrecognized range table entry.",
NULL, NULL);
}
}
/* RTE Relation can be of various types, check them now */
/* skip the regular views as they are replaced with subqueries */
if (rangeTableEntry->relkind == RELKIND_VIEW)
{
continue;
}
if (rangeTableEntry->relkind == RELKIND_MATVIEW ||
rangeTableEntry->relkind == RELKIND_FOREIGN_TABLE)
{
/* Materialized view or Foreign table as target is not allowed */
if (IsMergeAllowedOnRelation(parse, rangeTableEntry))
{
/* Non target relation is ok */
continue;
}
else
{
/* Usually we don't reach this exception as the Postgres parser catches it */
StringInfo errorMessage = makeStringInfo();
appendStringInfo(errorMessage,
"MERGE command is not allowed on "
"relation type(relkind:%c)", rangeTableEntry->relkind);
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, errorMessage->data,
NULL, NULL);
}
}
if (rangeTableEntry->relkind != RELKIND_RELATION &&
rangeTableEntry->relkind != RELKIND_PARTITIONED_TABLE)
{
StringInfo errorMessage = makeStringInfo();
appendStringInfo(errorMessage, "Unexpected table type(relkind:%c) "
"in MERGE command", rangeTableEntry->relkind);
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, errorMessage->data,
NULL, NULL);
}
Assert(rangeTableEntry->relid != 0);
/* Reference tables are not supported yet */
if (IsCitusTableType(relationId, REFERENCE_TABLE))
{
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
"MERGE command is not supported on reference "
"tables yet", NULL, NULL);
}
/* Append/Range tables are not supported */
if (IsCitusTableType(relationId, APPEND_DISTRIBUTED) ||
IsCitusTableType(relationId, RANGE_DISTRIBUTED))
{
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
"For MERGE command, all the distributed tables "
"must be colocated, for append/range distribution, "
"colocation is not supported", NULL,
"Consider using hash distribution instead");
}
/*
* For now, save all distributed tables, later (below) we will
* check for supported combination(s).
*/
if (IsCitusTableType(relationId, DISTRIBUTED_TABLE))
{
distTablesList = lappend(distTablesList, rangeTableEntry);
continue;
}
/* Regular Postgres tables and Citus local tables are allowed */
if (!IsCitusTable(relationId) ||
IsCitusTableType(relationId, CITUS_LOCAL_TABLE))
{
foundLocalTables = true;
continue;
}
/* Any other Citus table type missing ? */
}
/* Ensure all tables are indeed local */
if (foundLocalTables && list_length(distTablesList) == 0)
{
/* All the tables are local, supported */
return NULL;
}
else if (foundLocalTables && list_length(distTablesList) > 0)
{
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
"MERGE command is not supported with "
"combination of distributed/local tables yet",
NULL, NULL);
}
/* Ensure all distributed tables are indeed co-located */
return ErrorIfDistTablesNotColocated(parse, distTablesList, restrictionContext);
}
/*
* QueryHasMergeCommand walks over the query tree and returns false if there
* is no Merge command (e.g., CMD_MERGE), true otherwise.
*/
static bool
QueryHasMergeCommand(Query *queryTree)
{
/* function is void for pre-15 versions of Postgres */
#if PG_VERSION_NUM < PG_VERSION_15
return false;
#else
/*
* Postgres currently doesn't support Merge queries inside subqueries and
* ctes, but lets be defensive and do query tree walk anyway.
*
* We do not call this path for fast-path queries to avoid this additional
* overhead.
*/
if (!ContainsMergeCommandWalker((Node *) queryTree))
{
/* No MERGE found */
return false;
}
return true;
#endif
}

View File

@ -316,23 +316,6 @@ RecursivelyPlanSubqueriesAndCTEs(Query *query, RecursivePlanningContext *context
RecursivelyPlanSetOperations(query, (Node *) query->setOperations, context); RecursivelyPlanSetOperations(query, (Node *) query->setOperations, context);
} }
/*
* If the FROM clause is recurring (does not contain a distributed table),
* then we cannot have any distributed tables appearing in subqueries in
* the SELECT and WHERE clauses.
*/
if (ShouldRecursivelyPlanSublinks(query))
{
/* replace all subqueries in the WHERE clause */
if (query->jointree && query->jointree->quals)
{
RecursivelyPlanAllSubqueries((Node *) query->jointree->quals, context);
}
/* replace all subqueries in the SELECT clause */
RecursivelyPlanAllSubqueries((Node *) query->targetList, context);
}
if (query->havingQual != NULL) if (query->havingQual != NULL)
{ {
if (NodeContainsSubqueryReferencingOuterQuery(query->havingQual)) if (NodeContainsSubqueryReferencingOuterQuery(query->havingQual))
@ -379,6 +362,27 @@ RecursivelyPlanSubqueriesAndCTEs(Query *query, RecursivePlanningContext *context
query, context); query, context);
} }
/*
* If the FROM clause is recurring (does not contain a distributed table),
* then we cannot have any distributed tables appearing in subqueries in
* the SELECT and WHERE clauses.
*
* We do the sublink conversations at the end of the recursive planning
* because earlier steps might have transformed the query into a
* shape that needs recursively planning the sublinks.
*/
if (ShouldRecursivelyPlanSublinks(query))
{
/* replace all subqueries in the WHERE clause */
if (query->jointree && query->jointree->quals)
{
RecursivelyPlanAllSubqueries((Node *) query->jointree->quals, context);
}
/* replace all subqueries in the SELECT clause */
RecursivelyPlanAllSubqueries((Node *) query->targetList, context);
}
return NULL; return NULL;
} }

View File

@ -151,6 +151,8 @@ static void ListConcatUniqueAttributeClassMemberLists(AttributeEquivalenceClass
secondClass); secondClass);
static Var * PartitionKeyForRTEIdentityInQuery(Query *query, int targetRTEIndex, static Var * PartitionKeyForRTEIdentityInQuery(Query *query, int targetRTEIndex,
Index *partitionKeyIndex); Index *partitionKeyIndex);
static bool AllRelationsInRestrictionContextColocated(RelationRestrictionContext *
restrictionContext);
static bool IsNotSafeRestrictionToRecursivelyPlan(Node *node); static bool IsNotSafeRestrictionToRecursivelyPlan(Node *node);
static JoinRestrictionContext * FilterJoinRestrictionContext( static JoinRestrictionContext * FilterJoinRestrictionContext(
JoinRestrictionContext *joinRestrictionContext, Relids JoinRestrictionContext *joinRestrictionContext, Relids
@ -381,8 +383,7 @@ SafeToPushdownUnionSubquery(Query *originalQuery,
return false; return false;
} }
if (!AllRelationsInListColocated(restrictionContext->relationRestrictionList, if (!AllRelationsInRestrictionContextColocated(restrictionContext))
RESTRICTION_CONTEXT))
{ {
/* distribution columns are equal, but tables are not co-located */ /* distribution columns are equal, but tables are not co-located */
return false; return false;
@ -1550,7 +1551,21 @@ AddRteRelationToAttributeEquivalenceClass(AttributeEquivalenceClass *
Assert(rangeTableEntry->rtekind == RTE_RELATION); Assert(rangeTableEntry->rtekind == RTE_RELATION);
/* we don't need reference tables in the equality on columns */ /*
* we only calculate the equivalence of distributed tables.
* This leads to certain shortcomings in the query planning when reference
* tables and/or intermediate results are involved in the query. For example,
* the following query patterns could actually be pushed-down in a single iteration
* "(intermediate_res INNER JOIN dist dist1) INNER JOIN dist dist2 " or
* "(ref INNER JOIN dist dist1) JOIN dist dist2"
*
* However, if there are no explicit join conditions between distributed tables,
* the planner cannot deduce the equivalence between the distributed tables.
*
* Instead, we should be able to track all the equivalences between range table
* entries, and expand distributed table equivalences that happens via
* reference table/intermediate results
*/
if (relationPartitionKey == NULL) if (relationPartitionKey == NULL)
{ {
return; return;
@ -1904,33 +1919,19 @@ FindQueryContainingRTEIdentityInternal(Node *node,
/* /*
* AllRelationsInListColocated determines whether all of the relations in the * AllRelationsInRestrictionContextColocated determines whether all of the relations in the
* given list are co-located. * given relation restrictions list are co-located.
* Note: The list can be of dofferent types, which is specified by ListEntryType
*/ */
bool static bool
AllRelationsInListColocated(List *relationList, ListEntryType entryType) AllRelationsInRestrictionContextColocated(RelationRestrictionContext *restrictionContext)
{ {
void *varPtr = NULL;
RangeTblEntry *rangeTableEntry = NULL;
RelationRestriction *relationRestriction = NULL; RelationRestriction *relationRestriction = NULL;
int initialColocationId = INVALID_COLOCATION_ID; int initialColocationId = INVALID_COLOCATION_ID;
/* check whether all relations exists in the main restriction list */ /* check whether all relations exists in the main restriction list */
foreach_ptr(varPtr, relationList) foreach_ptr(relationRestriction, restrictionContext->relationRestrictionList)
{ {
Oid relationId = InvalidOid; Oid relationId = relationRestriction->relationId;
if (entryType == RANGETABLE_ENTRY)
{
rangeTableEntry = (RangeTblEntry *) varPtr;
relationId = rangeTableEntry->relid;
}
else if (entryType == RESTRICTION_CONTEXT)
{
relationRestriction = (RelationRestriction *) varPtr;
relationId = relationRestriction->relationId;
}
if (IsCitusTableType(relationId, CITUS_TABLE_WITH_NO_DIST_KEY)) if (IsCitusTableType(relationId, CITUS_TABLE_WITH_NO_DIST_KEY))
{ {

View File

@ -177,10 +177,18 @@ RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId)
*/ */
if (!PartitionedTable(relationId) || if (!PartitionedTable(relationId) ||
constraint->contype != CONSTR_CHECK) constraint->contype != CONSTR_CHECK)
{
/*
* constraint->conname could be empty in the case of
* ADD {PRIMARY KEY, UNIQUE} USING INDEX.
* In this case, already extended index name will be used by postgres.
*/
if (constraint->conname != NULL)
{ {
AppendShardIdToName(constraintName, shardId); AppendShardIdToName(constraintName, shardId);
} }
} }
}
else if (command->subtype == AT_DropConstraint || else if (command->subtype == AT_DropConstraint ||
command->subtype == AT_ValidateConstraint) command->subtype == AT_ValidateConstraint)
{ {

View File

@ -0,0 +1,4 @@
-- citus--11.2-1--11.3-1
-- bump version to 11.3-1

View File

@ -0,0 +1,2 @@
-- citus--11.3-1--11.2-1
-- this is an empty downgrade path since citus--11.2-1--11.3-1.sql is empty for now

View File

@ -74,7 +74,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.citus_job_status (
WHERE j.job_id = $1 WHERE j.job_id = $1
AND t.status = 'running' AND t.status = 'running'
), ),
errored_task_details AS ( errored_or_retried_task_details AS (
SELECT jsonb_agg(jsonb_build_object( SELECT jsonb_agg(jsonb_build_object(
'state', t.status, 'state', t.status,
'retried', coalesce(t.retry_count,0), 'retried', coalesce(t.retry_count,0),
@ -85,7 +85,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.citus_job_status (
pg_dist_background_task t JOIN pg_dist_background_job j ON t.job_id = j.job_id pg_dist_background_task t JOIN pg_dist_background_job j ON t.job_id = j.job_id
WHERE j.job_id = $1 WHERE j.job_id = $1
AND NOT EXISTS (SELECT 1 FROM rp WHERE rp.sessionid = t.pid) AND NOT EXISTS (SELECT 1 FROM rp WHERE rp.sessionid = t.pid)
AND t.status = 'error' AND (t.status = 'error' OR (t.status = 'runnable' AND t.retry_count > 0))
) )
SELECT SELECT
job_id, job_id,
@ -97,7 +97,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.citus_job_status (
jsonb_build_object( jsonb_build_object(
'task_state_counts', (SELECT jsonb_object_agg(status, count) FROM task_state_occurence_counts), 'task_state_counts', (SELECT jsonb_object_agg(status, count) FROM task_state_occurence_counts),
'tasks', (COALESCE((SELECT tasks FROM running_task_details),'[]'::jsonb) || 'tasks', (COALESCE((SELECT tasks FROM running_task_details),'[]'::jsonb) ||
COALESCE((SELECT tasks FROM errored_task_details),'[]'::jsonb))) AS details COALESCE((SELECT tasks FROM errored_or_retried_task_details),'[]'::jsonb))) AS details
FROM pg_dist_background_job j FROM pg_dist_background_job j
WHERE j.job_id = $1 WHERE j.job_id = $1
$fn$; $fn$;

View File

@ -74,7 +74,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.citus_job_status (
WHERE j.job_id = $1 WHERE j.job_id = $1
AND t.status = 'running' AND t.status = 'running'
), ),
errored_task_details AS ( errored_or_retried_task_details AS (
SELECT jsonb_agg(jsonb_build_object( SELECT jsonb_agg(jsonb_build_object(
'state', t.status, 'state', t.status,
'retried', coalesce(t.retry_count,0), 'retried', coalesce(t.retry_count,0),
@ -85,7 +85,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.citus_job_status (
pg_dist_background_task t JOIN pg_dist_background_job j ON t.job_id = j.job_id pg_dist_background_task t JOIN pg_dist_background_job j ON t.job_id = j.job_id
WHERE j.job_id = $1 WHERE j.job_id = $1
AND NOT EXISTS (SELECT 1 FROM rp WHERE rp.sessionid = t.pid) AND NOT EXISTS (SELECT 1 FROM rp WHERE rp.sessionid = t.pid)
AND t.status = 'error' AND (t.status = 'error' OR (t.status = 'runnable' AND t.retry_count > 0))
) )
SELECT SELECT
job_id, job_id,
@ -97,7 +97,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.citus_job_status (
jsonb_build_object( jsonb_build_object(
'task_state_counts', (SELECT jsonb_object_agg(status, count) FROM task_state_occurence_counts), 'task_state_counts', (SELECT jsonb_object_agg(status, count) FROM task_state_occurence_counts),
'tasks', (COALESCE((SELECT tasks FROM running_task_details),'[]'::jsonb) || 'tasks', (COALESCE((SELECT tasks FROM running_task_details),'[]'::jsonb) ||
COALESCE((SELECT tasks FROM errored_task_details),'[]'::jsonb))) AS details COALESCE((SELECT tasks FROM errored_or_retried_task_details),'[]'::jsonb))) AS details
FROM pg_dist_background_job j FROM pg_dist_background_job j
WHERE j.job_id = $1 WHERE j.job_id = $1
$fn$; $fn$;

View File

@ -80,6 +80,7 @@ StartRemoteTransactionBegin(struct MultiConnection *connection)
/* remember transaction as being in-progress */ /* remember transaction as being in-progress */
dlist_push_tail(&InProgressTransactions, &connection->transactionNode); dlist_push_tail(&InProgressTransactions, &connection->transactionNode);
connection->transactionInProgress = true;
transaction->transactionState = REMOTE_TRANS_STARTING; transaction->transactionState = REMOTE_TRANS_STARTING;
@ -865,11 +866,13 @@ ResetRemoteTransaction(struct MultiConnection *connection)
RemoteTransaction *transaction = &connection->remoteTransaction; RemoteTransaction *transaction = &connection->remoteTransaction;
/* unlink from list of open transactions, if necessary */ /* unlink from list of open transactions, if necessary */
if (transaction->transactionState != REMOTE_TRANS_NOT_STARTED) if (connection->transactionInProgress)
{ {
/* XXX: Should we error out for a critical transaction? */ /* XXX: Should we error out for a critical transaction? */
dlist_delete(&connection->transactionNode); dlist_delete(&connection->transactionNode);
connection->transactionInProgress = false;
memset(&connection->transactionNode, 0, sizeof(connection->transactionNode));
} }
/* just reset the entire state, relying on 0 being invalid/false */ /* just reset the entire state, relying on 0 being invalid/false */

View File

@ -287,7 +287,6 @@ extern bool TableHasExternalForeignKeys(Oid relationId);
extern List * GetForeignKeyOids(Oid relationId, int flags); extern List * GetForeignKeyOids(Oid relationId, int flags);
extern Oid GetReferencedTableId(Oid foreignKeyId); extern Oid GetReferencedTableId(Oid foreignKeyId);
extern Oid GetReferencingTableId(Oid foreignKeyId); extern Oid GetReferencingTableId(Oid foreignKeyId);
extern void EnableSkippingConstraintValidation(void);
extern bool RelationInvolvedInAnyNonInheritedForeignKeys(Oid relationId); extern bool RelationInvolvedInAnyNonInheritedForeignKeys(Oid relationId);
@ -547,7 +546,8 @@ extern List * PreprocessAlterTableMoveAllStmt(Node *node, const char *queryStrin
ProcessUtilityContext processUtilityContext); ProcessUtilityContext processUtilityContext);
extern List * PreprocessAlterTableSchemaStmt(Node *node, const char *queryString, extern List * PreprocessAlterTableSchemaStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext); ProcessUtilityContext processUtilityContext);
extern void SkipForeignKeyValidationIfConstraintIsFkey(AlterTableStmt *alterTableStmt); extern void SkipForeignKeyValidationIfConstraintIsFkey(AlterTableStmt *alterTableStmt,
bool processLocalRelation);
extern bool IsAlterTableRenameStmt(RenameStmt *renameStmt); extern bool IsAlterTableRenameStmt(RenameStmt *renameStmt);
extern void ErrorIfAlterDropsPartitionColumn(AlterTableStmt *alterTableStatement); extern void ErrorIfAlterDropsPartitionColumn(AlterTableStmt *alterTableStatement);
extern void PostprocessAlterTableStmt(AlterTableStmt *pStmt); extern void PostprocessAlterTableStmt(AlterTableStmt *pStmt);

View File

@ -189,8 +189,12 @@ typedef struct MultiConnection
/* information about the associated remote transaction */ /* information about the associated remote transaction */
RemoteTransaction remoteTransaction; RemoteTransaction remoteTransaction;
/* membership in list of in-progress transactions */ /*
* membership in list of in-progress transactions and a flag to indicate
* that the connection was added to this list
*/
dlist_node transactionNode; dlist_node transactionNode;
bool transactionInProgress;
/* list of all placements referenced by this connection */ /* list of all placements referenced by this connection */
dlist_head referencedPlacements; dlist_head referencedPlacements;

View File

@ -246,17 +246,15 @@ extern PlannedStmt * FinalizePlan(PlannedStmt *localPlan,
extern RTEListProperties * GetRTEListPropertiesForQuery(Query *query); extern RTEListProperties * GetRTEListPropertiesForQuery(Query *query);
extern struct DistributedPlan * CreateDistributedPlan(uint64 planId, Query *originalQuery, extern struct DistributedPlan * CreateDistributedPlan(uint64 planId,
Query *query, ParamListInfo bool allowRecursivePlanning,
boundParams, bool Query *originalQuery,
hasUnresolvedParams, Query *query,
ParamListInfo boundParams,
bool hasUnresolvedParams,
PlannerRestrictionContext * PlannerRestrictionContext *
plannerRestrictionContext); plannerRestrictionContext);
extern bool IsMergeAllowedOnRelation(Query *parse, RangeTblEntry *rte); extern bool IsMergeAllowedOnRelation(Query *parse, RangeTblEntry *rte);
extern bool ConjunctionContainsColumnFilter(Node *node,
Var *column,
Node **distributionKeyValue);
extern bool ContainsMergeCommandWalker(Node *node);
#endif /* DISTRIBUTED_PLANNER_H */ #endif /* DISTRIBUTED_PLANNER_H */

View File

@ -353,7 +353,8 @@ extern void EnsureRelationExists(Oid relationId);
extern bool RegularTable(Oid relationId); extern bool RegularTable(Oid relationId);
extern bool TableEmpty(Oid tableId); extern bool TableEmpty(Oid tableId);
extern bool IsForeignTable(Oid relationId); extern bool IsForeignTable(Oid relationId);
extern bool RelationUsesIdentityColumns(TupleDesc relationDesc); extern bool ForeignTableDropsTableNameOption(List *optionList);
extern bool ServerUsesPostgresFdw(Oid serverId);
extern char * ConstructQualifiedShardName(ShardInterval *shardInterval); extern char * ConstructQualifiedShardName(ShardInterval *shardInterval);
extern uint64 GetFirstShardId(Oid relationId); extern uint64 GetFirstShardId(Oid relationId);
extern Datum StringToDatum(char *inputString, Oid dataType); extern Datum StringToDatum(char *inputString, Oid dataType);

View File

@ -99,7 +99,6 @@ extern PlannedStmt * FastPathPlanner(Query *originalQuery, Query *parse, ParamLi
boundParams); boundParams);
extern bool FastPathRouterQuery(Query *query, Node **distributionKeyValue); extern bool FastPathRouterQuery(Query *query, Node **distributionKeyValue);
extern bool JoinConditionIsOnFalse(List *relOptInfo); extern bool JoinConditionIsOnFalse(List *relOptInfo);
extern Oid ResultRelationOidForQuery(Query *query);
#endif /* MULTI_ROUTER_PLANNER_H */ #endif /* MULTI_ROUTER_PLANNER_H */

View File

@ -17,15 +17,6 @@
#define SINGLE_RTE_INDEX 1 #define SINGLE_RTE_INDEX 1
/*
* Represents the pointer type that's being passed in the list.
*/
typedef enum ListEntryType
{
RANGETABLE_ENTRY, /* RangeTblEntry */
RESTRICTION_CONTEXT /* RelationRestriction */
} ListEntryType;
extern bool AllDistributionKeysInQueryAreEqual(Query *originalQuery, extern bool AllDistributionKeysInQueryAreEqual(Query *originalQuery,
PlannerRestrictionContext * PlannerRestrictionContext *
plannerRestrictionContext); plannerRestrictionContext);
@ -63,6 +54,4 @@ extern RelationRestrictionContext * FilterRelationRestrictionContext(
RelationRestrictionContext *relationRestrictionContext, RelationRestrictionContext *relationRestrictionContext,
Relids Relids
queryRteIdentities); queryRteIdentities);
extern bool AllRelationsInListColocated(List *relationList, ListEntryType entryType);
#endif /* RELATION_RESTRICTION_EQUIVALENCE_H */ #endif /* RELATION_RESTRICTION_EQUIVALENCE_H */

View File

@ -128,6 +128,22 @@ check-isolation-custom-schedule: all $(isolation_test_files)
$(pg_regress_multi_check) --load-extension=citus --isolationtester \ $(pg_regress_multi_check) --load-extension=citus --isolationtester \
-- $(MULTI_REGRESS_OPTS) --inputdir=$(citus_abs_srcdir)/build --schedule=$(citus_abs_srcdir)/$(SCHEDULE) $(EXTRA_TESTS) -- $(MULTI_REGRESS_OPTS) --inputdir=$(citus_abs_srcdir)/build --schedule=$(citus_abs_srcdir)/$(SCHEDULE) $(EXTRA_TESTS)
check-custom-schedule-vg: all
$(pg_regress_multi_check) --load-extension=citus \
--valgrind --pg_ctl-timeout=360 --connection-timeout=500000 --valgrind-path=valgrind --valgrind-log-file=$(CITUS_VALGRIND_LOG_FILE) \
-- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/$(SCHEDULE) $(EXTRA_TESTS)
check-failure-custom-schedule-vg: all
$(pg_regress_multi_check) --load-extension=citus --mitmproxy \
--valgrind --pg_ctl-timeout=360 --connection-timeout=500000 --valgrind-path=valgrind --valgrind-log-file=$(CITUS_VALGRIND_LOG_FILE) \
-- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/$(SCHEDULE) $(EXTRA_TESTS)
check-isolation-custom-schedule-vg: all $(isolation_test_files)
$(pg_regress_multi_check) --load-extension=citus --isolationtester \
--valgrind --pg_ctl-timeout=360 --connection-timeout=500000 --valgrind-path=valgrind --valgrind-log-file=$(CITUS_VALGRIND_LOG_FILE) \
-- $(MULTI_REGRESS_OPTS) --inputdir=$(citus_abs_srcdir)/build --schedule=$(citus_abs_srcdir)/$(SCHEDULE) $(EXTRA_TESTS)
check-empty: all check-empty: all
$(pg_regress_multi_check) --load-extension=citus \ $(pg_regress_multi_check) --load-extension=citus \
-- $(MULTI_REGRESS_OPTS) $(EXTRA_TESTS) -- $(MULTI_REGRESS_OPTS) $(EXTRA_TESTS)

View File

@ -125,7 +125,11 @@ def copy_test_files(config):
with open(scheduleName) as file: with open(scheduleName) as file:
lines = file.readlines() lines = file.readlines()
for line in lines: for line in lines:
colon_index = line.index(":") colon_index = line.find(":")
# skip empty lines
if colon_index == -1:
continue
line = line[colon_index + 1 :].strip() line = line[colon_index + 1 :].strip()
test_names = line.split(" ") test_names = line.split(" ")
copy_test_files_with_names(test_names, sql_dir_path, expected_dir_path, config) copy_test_files_with_names(test_names, sql_dir_path, expected_dir_path, config)

View File

@ -38,7 +38,7 @@ CITUS_ARBITRARY_TEST_DIR = "./tmp_citus_test"
MASTER = "master" MASTER = "master"
# This should be updated when citus version changes # This should be updated when citus version changes
MASTER_VERSION = "11.2" MASTER_VERSION = "11.3"
HOME = expanduser("~") HOME = expanduser("~")

View File

@ -17,6 +17,7 @@ args.add_argument("-p", "--path", required=False, help="Relative path for test f
args.add_argument("-r", "--repeat", help="Number of test to run", type=int, default=1) args.add_argument("-r", "--repeat", help="Number of test to run", type=int, default=1)
args.add_argument("-b", "--use-base-schedule", required=False, help="Choose base-schedules rather than minimal-schedules", action='store_true') args.add_argument("-b", "--use-base-schedule", required=False, help="Choose base-schedules rather than minimal-schedules", action='store_true')
args.add_argument("-w", "--use-whole-schedule-line", required=False, help="Use the whole line found in related schedule", action='store_true') args.add_argument("-w", "--use-whole-schedule-line", required=False, help="Use the whole line found in related schedule", action='store_true')
args.add_argument("--valgrind", required=False, help="Run the test with valgrind enabled", action='store_true')
args = vars(args.parse_args()) args = vars(args.parse_args())
@ -120,6 +121,9 @@ elif "failure" in test_schedule:
else: else:
make_recipe = 'check-custom-schedule' make_recipe = 'check-custom-schedule'
if args['valgrind']:
make_recipe += '-vg'
# prepare command to run tests # prepare command to run tests
test_command = f"make -C {regress_dir} {make_recipe} SCHEDULE='{pathlib.Path(tmp_schedule_path).stem}'" test_command = f"make -C {regress_dir} {make_recipe} SCHEDULE='{pathlib.Path(tmp_schedule_path).stem}'"

View File

@ -7,3 +7,4 @@ test: isolation_cluster_management
test: isolation_logical_replication_single_shard_commands_on_mx test: isolation_logical_replication_single_shard_commands_on_mx
test: isolation_logical_replication_multi_shard_commands_on_mx test: isolation_logical_replication_multi_shard_commands_on_mx
test: isolation_logical_replication_skip_fk_validation

View File

@ -210,6 +210,100 @@ SELECT citus_rebalance_stop();
(1 row) (1 row)
RESET ROLE; RESET ROLE;
CREATE TABLE ref_no_pk(a int);
SELECT create_reference_table('ref_no_pk');
create_reference_table
---------------------------------------------------------------------
(1 row)
CREATE TABLE ref_with_pk(a int primary key);
SELECT create_reference_table('ref_with_pk');
create_reference_table
---------------------------------------------------------------------
(1 row)
-- Add coordinator so there's a node which doesn't have the reference tables
SELECT 1 FROM citus_add_node('localhost', :master_port, groupId=>0);
NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
?column?
---------------------------------------------------------------------
1
(1 row)
-- fails
BEGIN;
SELECT 1 FROM citus_rebalance_start();
ERROR: cannot use logical replication to transfer shards of the relation ref_no_pk since it doesn't have a REPLICA IDENTITY or PRIMARY KEY
DETAIL: UPDATE and DELETE commands on the shard will error out during logical replication unless there is a REPLICA IDENTITY or PRIMARY KEY.
HINT: If you wish to continue without a replica identity set the shard_transfer_mode to 'force_logical' or 'block_writes'.
ROLLBACK;
-- success
BEGIN;
SELECT 1 FROM citus_rebalance_start(shard_transfer_mode := 'force_logical');
NOTICE: Scheduled 1 moves as job xxx
DETAIL: Rebalance scheduled as background job
HINT: To monitor progress, run: SELECT * FROM citus_rebalance_status();
?column?
---------------------------------------------------------------------
1
(1 row)
ROLLBACK;
-- success
BEGIN;
SELECT 1 FROM citus_rebalance_start(shard_transfer_mode := 'block_writes');
NOTICE: Scheduled 1 moves as job xxx
DETAIL: Rebalance scheduled as background job
HINT: To monitor progress, run: SELECT * FROM citus_rebalance_status();
?column?
---------------------------------------------------------------------
1
(1 row)
ROLLBACK;
-- fails
SELECT 1 FROM citus_rebalance_start();
ERROR: cannot use logical replication to transfer shards of the relation ref_no_pk since it doesn't have a REPLICA IDENTITY or PRIMARY KEY
DETAIL: UPDATE and DELETE commands on the shard will error out during logical replication unless there is a REPLICA IDENTITY or PRIMARY KEY.
HINT: If you wish to continue without a replica identity set the shard_transfer_mode to 'force_logical' or 'block_writes'.
-- succeeds
SELECT 1 FROM citus_rebalance_start(shard_transfer_mode := 'force_logical');
NOTICE: Scheduled 1 moves as job xxx
DETAIL: Rebalance scheduled as background job
HINT: To monitor progress, run: SELECT * FROM citus_rebalance_status();
?column?
---------------------------------------------------------------------
1
(1 row)
-- wait for success
SELECT citus_rebalance_wait();
citus_rebalance_wait
---------------------------------------------------------------------
(1 row)
SELECT state, details from citus_rebalance_status();
state | details
---------------------------------------------------------------------
finished | {"tasks": [], "task_state_counts": {"done": 2}}
(1 row)
-- Remove coordinator again to allow rerunning of this test
SELECT 1 FROM citus_remove_node('localhost', :master_port);
?column?
---------------------------------------------------------------------
1
(1 row)
SELECT public.wait_until_metadata_sync(30000);
wait_until_metadata_sync
---------------------------------------------------------------------
(1 row)
SET client_min_messages TO WARNING; SET client_min_messages TO WARNING;
DROP SCHEMA background_rebalance CASCADE; DROP SCHEMA background_rebalance CASCADE;
DROP USER non_super_user_rebalance; DROP USER non_super_user_rebalance;

View File

@ -497,6 +497,30 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS c
100 100
(1 row) (1 row)
-- prefer-distributed option causes recursive planner passes the query 2 times and errors out
-- planner recursively plans one of the distributed_table in its first pass. Then, at its second
-- pass, it also recursively plans other distributed_table as modification at first step caused it.
SET citus.local_table_join_policy TO 'prefer-distributed';
SELECT
COUNT(*)
FROM
postgres_table
JOIN
distributed_table
USING
(key)
JOIN
(SELECT key, NULL, NULL FROM distributed_table) foo
USING
(key);
DEBUG: Wrapping relation "distributed_table" to a subquery
DEBUG: generating subplan XXX_1 for subquery SELECT key FROM citus_local_dist_joins.distributed_table WHERE true
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((citus_local_dist_joins.postgres_table JOIN (SELECT distributed_table_1.key, NULL::text AS value, NULL::jsonb AS value_2 FROM (SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) distributed_table_1) distributed_table USING (key)) JOIN (SELECT distributed_table_1.key, NULL::text, NULL::text FROM citus_local_dist_joins.distributed_table distributed_table_1) foo(key, "?column?", "?column?_1") USING (key))
DEBUG: Wrapping relation "postgres_table" to a subquery
DEBUG: generating subplan XXX_1 for subquery SELECT key FROM citus_local_dist_joins.postgres_table WHERE true
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT postgres_table_1.key, NULL::text AS value, NULL::jsonb AS value_2 FROM (SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) postgres_table_1) postgres_table JOIN (SELECT distributed_table_1.key, NULL::text AS value, NULL::jsonb AS value_2 FROM (SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) distributed_table_1) distributed_table USING (key)) JOIN (SELECT distributed_table_1.key, NULL::text, NULL::text FROM citus_local_dist_joins.distributed_table distributed_table_1) foo(key, "?column?", "?column?_1") USING (key))
ERROR: recursive complex joins are only supported when all distributed tables are co-located and joined on their distribution columns
RESET citus.local_table_join_policy;
SET client_min_messages to ERROR; SET client_min_messages to ERROR;
DROP TABLE citus_local; DROP TABLE citus_local;
SELECT master_remove_node('localhost', :master_port); SELECT master_remove_node('localhost', :master_port);

View File

@ -217,7 +217,7 @@ ROLLBACK;
CREATE FOREIGN TABLE foreign_table ( CREATE FOREIGN TABLE foreign_table (
id bigint not null, id bigint not null,
full_name text not null default '' full_name text not null default ''
) SERVER fake_fdw_server OPTIONS (encoding 'utf-8', compression 'true'); ) SERVER fake_fdw_server OPTIONS (encoding 'utf-8', compression 'true', table_name 'foreign_table');
-- observe that we do not create fdw server for shell table, both shard relation -- observe that we do not create fdw server for shell table, both shard relation
-- & shell relation points to the same same server object -- & shell relation points to the same same server object
-- Disable metadata sync since citus doesn't support distributing -- Disable metadata sync since citus doesn't support distributing

View File

@ -1,24 +1,50 @@
-- --
-- Test the CREATE statements related to columnar. -- Test the CREATE statements related to columnar.
-- --
-- We cannot create below tables within columnar_create because columnar_create
-- is dropped at the end of this test but unfortunately some other tests depend
-- those tables too.
--
-- However, this file has to be runnable multiple times for flaky test detection;
-- so we create them below --outside columnar_create-- idempotantly.
DO
$$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM pg_class
WHERE relname = 'contestant' AND
relnamespace = (
SELECT oid FROM pg_namespace WHERE nspname = 'public'
)
)
THEN
-- Create uncompressed table -- Create uncompressed table
CREATE TABLE contestant (handle TEXT, birthdate DATE, rating INT, CREATE TABLE contestant (handle TEXT, birthdate DATE, rating INT,
percentile FLOAT, country CHAR(3), achievements TEXT[]) percentile FLOAT, country CHAR(3), achievements TEXT[])
USING columnar; USING columnar;
ALTER TABLE contestant SET (columnar.compression = none); ALTER TABLE contestant SET (columnar.compression = none);
CREATE INDEX contestant_idx on contestant(handle); CREATE INDEX contestant_idx on contestant(handle);
-- Create zstd compressed table -- Create zstd compressed table
CREATE TABLE contestant_compressed (handle TEXT, birthdate DATE, rating INT, CREATE TABLE contestant_compressed (handle TEXT, birthdate DATE, rating INT,
percentile FLOAT, country CHAR(3), achievements TEXT[]) percentile FLOAT, country CHAR(3), achievements TEXT[])
USING columnar; USING columnar;
-- Test that querying an empty table works -- Test that querying an empty table works
ANALYZE contestant; ANALYZE contestant;
END IF;
END
$$
LANGUAGE plpgsql;
SELECT count(*) FROM contestant; SELECT count(*) FROM contestant;
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
0 0
(1 row) (1 row)
CREATE SCHEMA columnar_create;
SET search_path TO columnar_create;
-- Should fail: unlogged tables not supported -- Should fail: unlogged tables not supported
CREATE UNLOGGED TABLE columnar_unlogged(i int) USING columnar; CREATE UNLOGGED TABLE columnar_unlogged(i int) USING columnar;
ERROR: unlogged columnar tables are not supported ERROR: unlogged columnar tables are not supported
@ -55,6 +81,89 @@ SELECT columnar_test_helpers.columnar_metadata_has_storage_id(:columnar_table_1_
t t
(1 row) (1 row)
BEGIN;
INSERT INTO columnar_table_1 VALUES (2);
ROLLBACK;
INSERT INTO columnar_table_1 VALUES (3),(4);
INSERT INTO columnar_table_1 VALUES (5),(6);
INSERT INTO columnar_table_1 VALUES (7),(8);
-- Test whether columnar metadata accessors are still fine even
-- when the metadata indexes are not available to them.
BEGIN;
ALTER INDEX columnar_internal.stripe_first_row_number_idx RENAME TO new_index_name;
ALTER INDEX columnar_internal.chunk_pkey RENAME TO new_index_name_1;
ALTER INDEX columnar_internal.stripe_pkey RENAME TO new_index_name_2;
ALTER INDEX columnar_internal.chunk_group_pkey RENAME TO new_index_name_3;
CREATE INDEX columnar_table_1_idx ON columnar_table_1(a);
WARNING: Metadata index stripe_first_row_number_idx is not available, this might mean slower read/writes on columnar tables. This is expected during Postgres upgrades and not expected otherwise.
WARNING: Metadata index stripe_first_row_number_idx is not available, this might mean slower read/writes on columnar tables. This is expected during Postgres upgrades and not expected otherwise.
WARNING: Metadata index chunk_pkey is not available, this might mean slower read/writes on columnar tables. This is expected during Postgres upgrades and not expected otherwise.
WARNING: Metadata index chunk_group_pkey is not available, this might mean slower read/writes on columnar tables. This is expected during Postgres upgrades and not expected otherwise.
-- make sure that we test index scan
SET LOCAL columnar.enable_custom_scan TO 'off';
SET LOCAL enable_seqscan TO off;
SET LOCAL seq_page_cost TO 10000000;
SELECT * FROM columnar_table_1 WHERE a = 6;
WARNING: Metadata index stripe_first_row_number_idx is not available, this might mean slower read/writes on columnar tables. This is expected during Postgres upgrades and not expected otherwise.
a
---------------------------------------------------------------------
6
(1 row)
SELECT * FROM columnar_table_1 WHERE a = 5;
a
---------------------------------------------------------------------
5
(1 row)
SELECT * FROM columnar_table_1 WHERE a = 7;
a
---------------------------------------------------------------------
7
(1 row)
SELECT * FROM columnar_table_1 WHERE a = 3;
a
---------------------------------------------------------------------
3
(1 row)
DROP INDEX columnar_table_1_idx;
-- Re-shuffle some metadata records to test whether we can
-- rely on sequential metadata scan when the metadata records
-- are not ordered by their "first_row_number"s.
WITH cte AS (
DELETE FROM columnar_internal.stripe
WHERE storage_id = columnar.get_storage_id('columnar_table_1')
RETURNING *
)
INSERT INTO columnar_internal.stripe SELECT * FROM cte ORDER BY first_row_number DESC;
SELECT SUM(a) FROM columnar_table_1;
sum
---------------------------------------------------------------------
34
(1 row)
SELECT * FROM columnar_table_1 WHERE a = 6;
a
---------------------------------------------------------------------
6
(1 row)
-- Run a SELECT query after the INSERT command to force flushing the
-- data within the xact block.
INSERT INTO columnar_table_1 VALUES (20);
SELECT COUNT(*) FROM columnar_table_1;
WARNING: Metadata index stripe_pkey is not available, this might mean slower read/writes on columnar tables. This is expected during Postgres upgrades and not expected otherwise.
count
---------------------------------------------------------------------
8
(1 row)
DROP TABLE columnar_table_1 CASCADE;
NOTICE: drop cascades to materialized view columnar_table_1_mv
WARNING: Metadata index on a columnar metadata table is not available, this might mean slower read/writes on columnar tables. This is expected during Postgres upgrades and not expected otherwise.
ROLLBACK;
-- test dropping columnar table -- test dropping columnar table
DROP TABLE columnar_table_1 CASCADE; DROP TABLE columnar_table_1 CASCADE;
NOTICE: drop cascades to materialized view columnar_table_1_mv NOTICE: drop cascades to materialized view columnar_table_1_mv
@ -73,6 +182,7 @@ SELECT columnar.get_storage_id(oid) AS columnar_temp_storage_id
FROM pg_class WHERE relname='columnar_temp' \gset FROM pg_class WHERE relname='columnar_temp' \gset
SELECT pg_backend_pid() AS val INTO old_backend_pid; SELECT pg_backend_pid() AS val INTO old_backend_pid;
\c - - - :master_port \c - - - :master_port
SET search_path TO columnar_create;
-- wait until old backend to expire to make sure that temp table cleanup is complete -- wait until old backend to expire to make sure that temp table cleanup is complete
SELECT columnar_test_helpers.pg_waitpid(val) FROM old_backend_pid; SELECT columnar_test_helpers.pg_waitpid(val) FROM old_backend_pid;
pg_waitpid pg_waitpid
@ -182,3 +292,5 @@ SELECT columnar_test_helpers.columnar_metadata_has_storage_id(:columnar_temp_sto
-- make sure citus_columnar can be loaded -- make sure citus_columnar can be loaded
LOAD 'citus_columnar'; LOAD 'citus_columnar';
SET client_min_messages TO WARNING;
DROP SCHEMA columnar_create CASCADE;

View File

@ -257,6 +257,32 @@ SELECT SUM(a)=48000 FROM columnar_table WHERE a = 16000 OR a = 32000;
t t
(1 row) (1 row)
BEGIN;
ALTER INDEX columnar_internal.stripe_first_row_number_idx RENAME TO new_index_name;
ALTER INDEX columnar_internal.chunk_pkey RENAME TO new_index_name_1;
-- same queries but this time some metadata indexes are not available
SELECT SUM(a)=312487500 FROM columnar_table WHERE a < 25000;
WARNING: Metadata index stripe_first_row_number_idx is not available, this might mean slower read/writes on columnar tables. This is expected during Postgres upgrades and not expected otherwise.
WARNING: Metadata index stripe_first_row_number_idx is not available, this might mean slower read/writes on columnar tables. This is expected during Postgres upgrades and not expected otherwise.
WARNING: Metadata index chunk_pkey is not available, this might mean slower read/writes on columnar tables. This is expected during Postgres upgrades and not expected otherwise.
?column?
---------------------------------------------------------------------
t
(1 row)
SELECT SUM(a)=167000 FROM columnar_table WHERE a = 16000 OR a = 151000;
?column?
---------------------------------------------------------------------
t
(1 row)
SELECT SUM(a)=48000 FROM columnar_table WHERE a = 16000 OR a = 32000;
?column?
---------------------------------------------------------------------
t
(1 row)
ROLLBACK;
TRUNCATE columnar_table; TRUNCATE columnar_table;
ALTER TABLE columnar_table DROP CONSTRAINT columnar_table_pkey; ALTER TABLE columnar_table DROP CONSTRAINT columnar_table_pkey;
-- hash -- -- hash --

View File

@ -609,5 +609,18 @@ SELECT * FROM reference_table ORDER BY 1;
(5) (5)
(5 rows) (5 rows)
-- failing UPDATE on a reference table with a subquery in RETURNING clause that needs to be pushed-down.
-- the error message is not great, but at least we no longer see crashes.
CREATE TABLE ref (a int);
SELECT create_reference_table('ref');
create_reference_table
---------------------------------------------------------------------
(1 row)
UPDATE ref SET a = 1 RETURNING
(SELECT pg_catalog.max(latest_end_time) FROM pg_catalog.pg_stat_wal_receiver)
as c3;
ERROR: a column definition list is required for functions returning "record"
SET client_min_messages TO ERROR; SET client_min_messages TO ERROR;
DROP SCHEMA coordinator_evaluation CASCADE; DROP SCHEMA coordinator_evaluation CASCADE;

View File

@ -0,0 +1,43 @@
-- coordinator
CREATE SCHEMA drop_database;
SET search_path TO drop_database;
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 35137400;
CREATE DATABASE citus_created;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
\c citus_created
CREATE EXTENSION citus;
CREATE DATABASE citus_not_created;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
\c citus_not_created
DROP DATABASE citus_created;
\c regression
DROP DATABASE citus_not_created;
-- worker1
\c - - - :worker_1_port
SET search_path TO drop_database;
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 35137400;
CREATE DATABASE citus_created;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
\c citus_created
CREATE EXTENSION citus;
CREATE DATABASE citus_not_created;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
\c citus_not_created
DROP DATABASE citus_created;
\c regression
DROP DATABASE citus_not_created;
\c - - - :master_port
SET client_min_messages TO WARNING;
DROP SCHEMA drop_database CASCADE;

View File

@ -4,6 +4,15 @@ SET citus.shard_replication_factor TO 1;
SET citus.enable_local_execution TO ON; SET citus.enable_local_execution TO ON;
CREATE SCHEMA foreign_tables_schema_mx; CREATE SCHEMA foreign_tables_schema_mx;
SET search_path TO foreign_tables_schema_mx; SET search_path TO foreign_tables_schema_mx;
SET client_min_messages to ERROR;
-- ensure that coordinator is added to pg_dist_node
SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0);
?column?
---------------------------------------------------------------------
1
(1 row)
RESET client_min_messages;
-- test adding foreign table to metadata with the guc -- test adding foreign table to metadata with the guc
SET citus.use_citus_managed_tables TO ON; SET citus.use_citus_managed_tables TO ON;
CREATE TABLE foreign_table_test (id integer NOT NULL, data text, a bigserial); CREATE TABLE foreign_table_test (id integer NOT NULL, data text, a bigserial);
@ -379,15 +388,93 @@ SELECT * FROM ref_tbl d JOIN foreign_table_local f ON d.a=f.id ORDER BY f.id;
\c - - - :master_port \c - - - :master_port
SET search_path TO foreign_tables_schema_mx; SET search_path TO foreign_tables_schema_mx;
-- should error out because doesn't have a table_name field
CREATE FOREIGN TABLE foreign_table_local_fails ( CREATE FOREIGN TABLE foreign_table_local_fails (
id integer NOT NULL, id integer NOT NULL,
data text data text
) )
SERVER foreign_server_local SERVER foreign_server_local
OPTIONS (schema_name 'foreign_tables_schema_mx'); OPTIONS (schema_name 'foreign_tables_schema_mx');
-- should error out because doesn't have a table_name field
SELECT citus_add_local_table_to_metadata('foreign_table_local_fails');
ERROR: table_name option must be provided when using postgres_fdw with Citus ERROR: table_name option must be provided when using postgres_fdw with Citus
-- should work since it has a table_name
ALTER FOREIGN TABLE foreign_table_local_fails OPTIONS (table_name 'foreign_table_test');
SELECT citus_add_local_table_to_metadata('foreign_table_local_fails');
citus_add_local_table_to_metadata
---------------------------------------------------------------------
(1 row)
INSERT INTO foreign_table_test VALUES (1, 'test');
SELECT undistribute_table('foreign_table_local_fails');
NOTICE: creating a new table for foreign_tables_schema_mx.foreign_table_local_fails
NOTICE: dropping the old foreign_tables_schema_mx.foreign_table_local_fails
NOTICE: renaming the new table to foreign_tables_schema_mx.foreign_table_local_fails
undistribute_table
---------------------------------------------------------------------
(1 row)
DROP FOREIGN TABLE foreign_table_local; DROP FOREIGN TABLE foreign_table_local;
-- disallow dropping table_name when foreign table is in metadata
CREATE TABLE table_name_drop(id int);
CREATE FOREIGN TABLE foreign_table_name_drop_fails (
id INT
)
SERVER foreign_server_local
OPTIONS (schema_name 'foreign_tables_schema_mx', table_name 'table_name_drop');
SELECT citus_add_local_table_to_metadata('foreign_table_name_drop_fails');
citus_add_local_table_to_metadata
---------------------------------------------------------------------
(1 row)
-- table_name option is already added
ALTER FOREIGN TABLE foreign_table_name_drop_fails OPTIONS (ADD table_name 'table_name_drop');
ERROR: option "table_name" provided more than once
-- throw error if user tries to drop table_name option from a foreign table inside metadata
ALTER FOREIGN TABLE foreign_table_name_drop_fails OPTIONS (DROP table_name);
ERROR: alter foreign table alter options (drop table_name) command is not allowed for Citus tables
-- case sensitive option name
ALTER FOREIGN TABLE foreign_table_name_drop_fails OPTIONS (DROP Table_Name);
ERROR: alter foreign table alter options (drop table_name) command is not allowed for Citus tables
-- other options are allowed to drop
ALTER FOREIGN TABLE foreign_table_name_drop_fails OPTIONS (DROP schema_name);
CREATE FOREIGN TABLE foreign_table_name_drop (
id INT
)
SERVER foreign_server_local
OPTIONS (schema_name 'foreign_tables_schema_mx', table_name 'table_name_drop');
-- user can drop table_option if foreign table is not in metadata
ALTER FOREIGN TABLE foreign_table_name_drop OPTIONS (DROP table_name);
-- we should not intercept data wrappers other than postgres_fdw
CREATE EXTENSION file_fdw;
-- remove validator method to add table_name option; otherwise, table_name option is not allowed
SELECT result FROM run_command_on_all_nodes('ALTER FOREIGN DATA WRAPPER file_fdw NO VALIDATOR');
result
---------------------------------------------------------------------
ALTER FOREIGN DATA WRAPPER
ALTER FOREIGN DATA WRAPPER
ALTER FOREIGN DATA WRAPPER
(3 rows)
CREATE SERVER citustest FOREIGN DATA WRAPPER file_fdw;
\copy (select i from generate_series(0,100)i) to '/tmp/test_file_fdw.data';
CREATE FOREIGN TABLE citustest_filefdw (
data text
)
SERVER citustest
OPTIONS ( filename '/tmp/test_file_fdw.data');
-- add non-postgres_fdw table into metadata even if it does not have table_name option
SELECT citus_add_local_table_to_metadata('citustest_filefdw');
citus_add_local_table_to_metadata
---------------------------------------------------------------------
(1 row)
ALTER FOREIGN TABLE citustest_filefdw OPTIONS (ADD table_name 'unused_table_name_option');
-- drop table_name option of non-postgres_fdw table even if it is inside metadata
ALTER FOREIGN TABLE citustest_filefdw OPTIONS (DROP table_name);
-- cleanup at exit -- cleanup at exit
set client_min_messages to error; set client_min_messages to error;
DROP SCHEMA foreign_tables_schema_mx CASCADE; DROP SCHEMA foreign_tables_schema_mx CASCADE;

View File

@ -0,0 +1,52 @@
Parsed test spec with 3 sessions
starting permutation: s1-start-session-level-connection s3-acquire-advisory-lock s2-move-placement s1-start-session-level-connection s1-insert-violation-into-shard s3-release-advisory-lock
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
start_session_level_connection_to_node
---------------------------------------------------------------------
(1 row)
step s3-acquire-advisory-lock:
SELECT pg_advisory_lock(44000, 55152);
pg_advisory_lock
---------------------------------------------------------------------
(1 row)
step s2-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638);
<waiting ...>
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
start_session_level_connection_to_node
---------------------------------------------------------------------
(1 row)
step s1-insert-violation-into-shard:
SELECT run_commands_on_session_level_connection_to_node(format('INSERT INTO t1_%s VALUES (-1, -1)', (SELECT * FROM selected_shard_for_test_table)));
run_commands_on_session_level_connection_to_node
---------------------------------------------------------------------
(1 row)
step s3-release-advisory-lock:
SELECT pg_advisory_unlock(44000, 55152);
pg_advisory_unlock
---------------------------------------------------------------------
t
(1 row)
step s2-move-placement: <... completed>
master_move_shard_placement
---------------------------------------------------------------------
(1 row)

View File

@ -774,7 +774,7 @@ id|value
(0 rows) (0 rows)
starting permutation: s1-load-cache s1-start-connection s1-lock-to-split-shard s2-print-locks s2-non-blocking-shard-split s2-print-locks s2-show-pg_dist_cleanup s1-stop-connection starting permutation: s1-load-cache s1-start-connection s1-lock-to-split-shard s2-print-locks s2-non-blocking-shard-split s2-print-locks s2-show-pg_dist_cleanup-shards s1-stop-connection
create_distributed_table create_distributed_table
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -842,8 +842,9 @@ node_name|node_port|success|result
localhost| 57637|t |to_split_table_1500001-relation-AccessShareLock localhost| 57637|t |to_split_table_1500001-relation-AccessShareLock
(1 row) (1 row)
step s2-show-pg_dist_cleanup: step s2-show-pg_dist_cleanup-shards:
SELECT object_name, object_type, policy_type FROM pg_dist_cleanup; SELECT object_name, object_type, policy_type FROM pg_dist_cleanup
WHERE object_type = 1;
object_name |object_type|policy_type object_name |object_type|policy_type
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -859,7 +860,7 @@ stop_session_level_connection_to_node
(1 row) (1 row)
starting permutation: s1-start-connection s1-lock-to-split-shard s2-print-locks s2-non-blocking-shard-split s2-print-cluster s2-show-pg_dist_cleanup s1-stop-connection starting permutation: s1-start-connection s1-lock-to-split-shard s2-print-locks s2-non-blocking-shard-split s2-print-cluster s2-show-pg_dist_cleanup-shards s1-stop-connection
create_distributed_table create_distributed_table
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -929,8 +930,9 @@ id|value
--------------------------------------------------------------------- ---------------------------------------------------------------------
(0 rows) (0 rows)
step s2-show-pg_dist_cleanup: step s2-show-pg_dist_cleanup-shards:
SELECT object_name, object_type, policy_type FROM pg_dist_cleanup; SELECT object_name, object_type, policy_type FROM pg_dist_cleanup
WHERE object_type = 1;
object_name |object_type|policy_type object_name |object_type|policy_type
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -0,0 +1,30 @@
CREATE SCHEMA issue_6543;
SET search_path TO issue_6543;
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 67322500;
CREATE TABLE event (
tenant_id varchar,
id bigint,
primary key (tenant_id, id)
);
CREATE TABLE page (
tenant_id varchar,
id int,
primary key (tenant_id, id)
);
SELECT create_distributed_table('event', 'tenant_id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT create_distributed_table('page', 'tenant_id', colocate_with => 'event');
create_distributed_table
---------------------------------------------------------------------
(1 row)
alter table page add constraint fk21 foreign key (tenant_id, id) references event;
SET client_min_messages TO WARNING;
DROP SCHEMA issue_6543 CASCADE;

View File

@ -799,23 +799,6 @@ SELECT distributed.name, distributed.name, local.title, local.title FROM local
0 | 0 | 0 | 0 0 | 0 | 0 | 0
(1 row) (1 row)
SELECT
COUNT(*)
FROM
local
JOIN
distributed
USING
(id)
JOIN
(SELECT id, NULL, NULL FROM distributed) foo
USING
(id);
count
---------------------------------------------------------------------
101
(1 row)
BEGIN; BEGIN;
SELECT COUNT(DISTINCT title) FROM local; SELECT COUNT(DISTINCT title) FROM local;
count count

View File

@ -1463,9 +1463,8 @@ SELECT COUNT(*) FROM distributed_table_pkey JOIN postgres_table using(key)
WHERE distributed_table_pkey.key IN (SELECT key FROM distributed_table_pkey WHERE key = 5); WHERE distributed_table_pkey.key IN (SELECT key FROM distributed_table_pkey WHERE key = 5);
DEBUG: Wrapping relation "distributed_table_pkey" to a subquery DEBUG: Wrapping relation "distributed_table_pkey" to a subquery
DEBUG: generating subplan XXX_1 for subquery SELECT key FROM local_table_join.distributed_table_pkey WHERE (key OPERATOR(pg_catalog.=) 5) DEBUG: generating subplan XXX_1 for subquery SELECT key FROM local_table_join.distributed_table_pkey WHERE (key OPERATOR(pg_catalog.=) 5)
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT distributed_table_pkey_1.key, NULL::text AS value, NULL::jsonb AS value_2 FROM (SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) distributed_table_pkey_1) distributed_table_pkey JOIN local_table_join.postgres_table USING (key)) WHERE (distributed_table_pkey.key OPERATOR(pg_catalog.=) ANY (SELECT distributed_table_pkey_1.key FROM local_table_join.distributed_table_pkey distributed_table_pkey_1 WHERE (distributed_table_pkey_1.key OPERATOR(pg_catalog.=) 5))) DEBUG: generating subplan XXX_2 for subquery SELECT key FROM local_table_join.distributed_table_pkey WHERE (key OPERATOR(pg_catalog.=) 5)
DEBUG: generating subplan XXX_1 for subquery SELECT key FROM local_table_join.distributed_table_pkey WHERE (key OPERATOR(pg_catalog.=) 5) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT distributed_table_pkey_1.key, NULL::text AS value, NULL::jsonb AS value_2 FROM (SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) distributed_table_pkey_1) distributed_table_pkey JOIN local_table_join.postgres_table USING (key)) WHERE (distributed_table_pkey.key OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.key FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer)))
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT distributed_table_pkey_1.key, NULL::text AS value, NULL::jsonb AS value_2 FROM (SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) distributed_table_pkey_1) distributed_table_pkey JOIN local_table_join.postgres_table USING (key)) WHERE (distributed_table_pkey.key OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)))
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
100 100
@ -1475,9 +1474,8 @@ SELECT COUNT(*) FROM distributed_table_pkey JOIN postgres_table using(key)
WHERE distributed_table_pkey.key IN (SELECT key FROM distributed_table_pkey WHERE key = 5) AND distributed_table_pkey.key = 5; WHERE distributed_table_pkey.key IN (SELECT key FROM distributed_table_pkey WHERE key = 5) AND distributed_table_pkey.key = 5;
DEBUG: Wrapping relation "distributed_table_pkey" to a subquery DEBUG: Wrapping relation "distributed_table_pkey" to a subquery
DEBUG: generating subplan XXX_1 for subquery SELECT key FROM local_table_join.distributed_table_pkey WHERE (key OPERATOR(pg_catalog.=) 5) DEBUG: generating subplan XXX_1 for subquery SELECT key FROM local_table_join.distributed_table_pkey WHERE (key OPERATOR(pg_catalog.=) 5)
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT distributed_table_pkey_1.key, NULL::text AS value, NULL::jsonb AS value_2 FROM (SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) distributed_table_pkey_1) distributed_table_pkey JOIN local_table_join.postgres_table USING (key)) WHERE ((distributed_table_pkey.key OPERATOR(pg_catalog.=) ANY (SELECT distributed_table_pkey_1.key FROM local_table_join.distributed_table_pkey distributed_table_pkey_1 WHERE (distributed_table_pkey_1.key OPERATOR(pg_catalog.=) 5))) AND (distributed_table_pkey.key OPERATOR(pg_catalog.=) 5)) DEBUG: generating subplan XXX_2 for subquery SELECT key FROM local_table_join.distributed_table_pkey WHERE (key OPERATOR(pg_catalog.=) 5)
DEBUG: generating subplan XXX_1 for subquery SELECT key FROM local_table_join.distributed_table_pkey WHERE (key OPERATOR(pg_catalog.=) 5) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT distributed_table_pkey_1.key, NULL::text AS value, NULL::jsonb AS value_2 FROM (SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) distributed_table_pkey_1) distributed_table_pkey JOIN local_table_join.postgres_table USING (key)) WHERE ((distributed_table_pkey.key OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.key FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer))) AND (distributed_table_pkey.key OPERATOR(pg_catalog.=) 5))
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT distributed_table_pkey_1.key, NULL::text AS value, NULL::jsonb AS value_2 FROM (SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) distributed_table_pkey_1) distributed_table_pkey JOIN local_table_join.postgres_table USING (key)) WHERE ((distributed_table_pkey.key OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer))) AND (distributed_table_pkey.key OPERATOR(pg_catalog.=) 5))
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
100 100

View File

@ -18,7 +18,6 @@ SET search_path TO merge_schema;
SET citus.shard_count TO 4; SET citus.shard_count TO 4;
SET citus.next_shard_id TO 4000000; SET citus.next_shard_id TO 4000000;
SET citus.explain_all_tasks to true; SET citus.explain_all_tasks to true;
SET citus.shard_replication_factor TO 1;
SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0); SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0);
NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
?column? ?column?
@ -215,18 +214,9 @@ HINT: To remove the local data, run: SELECT truncate_local_data_after_distribut
(1 row) (1 row)
-- Updates one of the row with customer_id = 30002
SELECT * from target t WHERE t.customer_id = 30002;
customer_id | last_order_id | order_center | order_count | last_order
---------------------------------------------------------------------
30002 | 103 | AX | -1 | Sun Jan 17 19:53:00 2021
(1 row)
-- Turn on notice to print tasks sent to nodes (it should be a single task)
SET citus.log_remote_commands to true;
MERGE INTO target t MERGE INTO target t
USING source s USING source s
ON (t.customer_id = s.customer_id) AND t.customer_id = 30002 ON (t.customer_id = s.customer_id)
WHEN MATCHED AND t.order_center = 'XX' THEN WHEN MATCHED AND t.order_center = 'XX' THEN
DELETE DELETE
WHEN MATCHED THEN WHEN MATCHED THEN
@ -236,39 +226,7 @@ MERGE INTO target t
WHEN NOT MATCHED THEN -- New entry, record it. WHEN NOT MATCHED THEN -- New entry, record it.
INSERT (customer_id, last_order_id, order_center, order_count, last_order) INSERT (customer_id, last_order_id, order_center, order_count, last_order)
VALUES (customer_id, s.order_id, s.order_center, 123, s.order_time); VALUES (customer_id, s.order_id, s.order_center, 123, s.order_time);
NOTICE: issuing MERGE INTO merge_schema.target_4000002 t USING merge_schema.source_4000006 s ON ((t.customer_id OPERATOR(pg_catalog.=) s.customer_id) AND (t.customer_id OPERATOR(pg_catalog.=) 30002)) WHEN MATCHED AND ((t.order_center COLLATE "default") OPERATOR(pg_catalog.=) 'XX'::text) THEN DELETE WHEN MATCHED THEN UPDATE SET last_order_id = s.order_id, order_count = (t.order_count OPERATOR(pg_catalog.+) 1) WHEN NOT MATCHED THEN INSERT (customer_id, last_order_id, order_center, order_count, last_order) VALUES (s.customer_id, s.order_id, s.order_center, 123, s.order_time) ERROR: MERGE command is not supported on distributed/reference tables yet
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
SET citus.log_remote_commands to false;
SELECT * from target t WHERE t.customer_id = 30002;
customer_id | last_order_id | order_center | order_count | last_order
---------------------------------------------------------------------
30002 | 103 | AX | 0 | Sun Jan 17 19:53:00 2021
(1 row)
-- Deletes one of the row with customer_id = 30004
SELECT * from target t WHERE t.customer_id = 30004;
customer_id | last_order_id | order_center | order_count | last_order
---------------------------------------------------------------------
30004 | 99 | XX | -1 | Fri Sep 11 03:23:00 2020
(1 row)
MERGE INTO target t
USING source s
ON (t.customer_id = s.customer_id) AND t.customer_id = 30004
WHEN MATCHED AND t.order_center = 'XX' THEN
DELETE
WHEN MATCHED THEN
UPDATE SET -- Existing customer, update the order count and last_order_id
order_count = t.order_count + 1,
last_order_id = s.order_id
WHEN NOT MATCHED THEN -- New entry, record it.
INSERT (customer_id, last_order_id, order_center, order_count, last_order)
VALUES (customer_id, s.order_id, s.order_center, 123, s.order_time);
SELECT * from target t WHERE t.customer_id = 30004;
customer_id | last_order_id | order_center | order_count | last_order
---------------------------------------------------------------------
(0 rows)
-- --
-- Test MERGE with CTE as source -- Test MERGE with CTE as source
-- --
@ -428,39 +386,18 @@ HINT: To remove the local data, run: SELECT truncate_local_data_after_distribut
(1 row) (1 row)
SELECT * FROM t1 order by id;
id | val
---------------------------------------------------------------------
1 | 0
2 | 0
5 | 0
(3 rows)
SET citus.log_remote_commands to true;
WITH s1_res AS ( WITH s1_res AS (
SELECT * FROM s1 SELECT * FROM s1
) )
MERGE INTO t1 MERGE INTO t1
USING s1_res ON (s1_res.id = t1.id) AND t1.id = 6 USING s1_res ON (s1_res.id = t1.id)
WHEN MATCHED AND s1_res.val = 0 THEN WHEN MATCHED AND s1_res.val = 0 THEN
DELETE DELETE
WHEN MATCHED THEN WHEN MATCHED THEN
UPDATE SET val = t1.val + 1 UPDATE SET val = t1.val + 1
WHEN NOT MATCHED THEN WHEN NOT MATCHED THEN
INSERT (id, val) VALUES (s1_res.id, s1_res.val); INSERT (id, val) VALUES (s1_res.id, s1_res.val);
NOTICE: issuing WITH s1_res AS (SELECT s1.id, s1.val FROM merge_schema.s1_4000018 s1) MERGE INTO merge_schema.t1_4000014 t1 USING s1_res ON ((s1_res.id OPERATOR(pg_catalog.=) t1.id) AND (t1.id OPERATOR(pg_catalog.=) 6)) WHEN MATCHED AND (s1_res.val OPERATOR(pg_catalog.=) 0) THEN DELETE WHEN MATCHED THEN UPDATE SET val = (t1.val OPERATOR(pg_catalog.+) 1) WHEN NOT MATCHED THEN INSERT (id, val) VALUES (s1_res.id, s1_res.val) ERROR: MERGE command is not supported on distributed/reference tables yet
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
SET citus.log_remote_commands to false;
-- As the id 6 is NO match, VALUES(6, 1) should appear in target
SELECT * FROM t1 order by id;
id | val
---------------------------------------------------------------------
1 | 0
2 | 0
5 | 0
6 | 1
(4 rows)
-- --
-- Test with multiple join conditions -- Test with multiple join conditions
-- --
@ -616,39 +553,16 @@ HINT: To remove the local data, run: SELECT truncate_local_data_after_distribut
(1 row) (1 row)
SELECT * FROM t2 ORDER BY 1;
id | val | src
---------------------------------------------------------------------
1 | 0 | target
2 | 0 | target
3 | 1 | match
4 | 0 | match
(4 rows)
SET citus.log_remote_commands to true;
MERGE INTO t2 MERGE INTO t2
USING s2 USING s2
ON t2.id = s2.id AND t2.src = s2.src AND t2.id = 4 ON t2.id = s2.id AND t2.src = s2.src
WHEN MATCHED AND t2.val = 1 THEN WHEN MATCHED AND t2.val = 1 THEN
UPDATE SET val = s2.val + 10 UPDATE SET val = s2.val + 10
WHEN MATCHED THEN WHEN MATCHED THEN
DELETE DELETE
WHEN NOT MATCHED THEN WHEN NOT MATCHED THEN
INSERT (id, val, src) VALUES (s2.id, s2.val, s2.src); INSERT (id, val, src) VALUES (s2.id, s2.val, s2.src);
NOTICE: issuing MERGE INTO merge_schema.t2_4000023 t2 USING merge_schema.s2_4000027 s2 ON ((t2.id OPERATOR(pg_catalog.=) s2.id) AND (t2.src OPERATOR(pg_catalog.=) s2.src) AND (t2.id OPERATOR(pg_catalog.=) 4)) WHEN MATCHED AND (t2.val OPERATOR(pg_catalog.=) 1) THEN UPDATE SET val = (s2.val OPERATOR(pg_catalog.+) 10) WHEN MATCHED THEN DELETE WHEN NOT MATCHED THEN INSERT (id, val, src) VALUES (s2.id, s2.val, s2.src) ERROR: MERGE command is not supported on distributed/reference tables yet
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
SET citus.log_remote_commands to false;
-- Row with id = 4 is a match for delete clause, row should be deleted
-- Row with id = 3 is a NO match, row from source will be inserted
SELECT * FROM t2 ORDER BY 1;
id | val | src
---------------------------------------------------------------------
1 | 0 | target
2 | 0 | target
3 | 1 | match
3 | 10 | match
(4 rows)
-- --
-- With sub-query as the MERGE source -- With sub-query as the MERGE source
-- --
@ -1299,261 +1213,9 @@ SELECT * FROM ft_target;
3 | source 3 | source
(2 rows) (2 rows)
--
-- complex joins on the source side
--
-- source(join of two relations) relation is an unaliased join
CREATE TABLE target_cj(tid int, src text, val int);
CREATE TABLE source_cj1(sid1 int, src1 text, val1 int);
CREATE TABLE source_cj2(sid2 int, src2 text, val2 int);
INSERT INTO target_cj VALUES (1, 'target', 0);
INSERT INTO target_cj VALUES (2, 'target', 0);
INSERT INTO target_cj VALUES (2, 'target', 0);
INSERT INTO target_cj VALUES (3, 'target', 0);
INSERT INTO source_cj1 VALUES (2, 'source-1', 10);
INSERT INTO source_cj2 VALUES (2, 'source-2', 20);
BEGIN;
MERGE INTO target_cj t
USING source_cj1 s1 INNER JOIN source_cj2 s2 ON sid1 = sid2
ON t.tid = sid1 AND t.tid = 2
WHEN MATCHED THEN
UPDATE SET src = src2
WHEN NOT MATCHED THEN
DO NOTHING;
-- Gold result to compare against
SELECT * FROM target_cj ORDER BY 1;
tid | src | val
---------------------------------------------------------------------
1 | target | 0
2 | source-2 | 0
2 | source-2 | 0
3 | target | 0
(4 rows)
ROLLBACK;
BEGIN;
-- try accessing columns from either side of the source join
MERGE INTO target_cj t
USING source_cj1 s2
INNER JOIN source_cj2 s1 ON sid1 = sid2 AND val1 = 10
ON t.tid = sid1 AND t.tid = 2
WHEN MATCHED THEN
UPDATE SET tid = sid2, src = src1, val = val2
WHEN NOT MATCHED THEN
DO NOTHING;
-- Gold result to compare against
SELECT * FROM target_cj ORDER BY 1;
tid | src | val
---------------------------------------------------------------------
1 | target | 0
2 | source-1 | 20
2 | source-1 | 20
3 | target | 0
(4 rows)
ROLLBACK;
-- Test the same scenarios with distributed tables
SELECT create_distributed_table('target_cj', 'tid');
NOTICE: Copying data from local table...
NOTICE: copying the data has completed
DETAIL: The local data in the table is no longer visible, but is still on disk.
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$merge_schema.target_cj$$)
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT create_distributed_table('source_cj1', 'sid1');
NOTICE: Copying data from local table...
NOTICE: copying the data has completed
DETAIL: The local data in the table is no longer visible, but is still on disk.
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$merge_schema.source_cj1$$)
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT create_distributed_table('source_cj2', 'sid2');
NOTICE: Copying data from local table...
NOTICE: copying the data has completed
DETAIL: The local data in the table is no longer visible, but is still on disk.
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$merge_schema.source_cj2$$)
create_distributed_table
---------------------------------------------------------------------
(1 row)
BEGIN;
SET citus.log_remote_commands to true;
MERGE INTO target_cj t
USING source_cj1 s1 INNER JOIN source_cj2 s2 ON sid1 = sid2
ON t.tid = sid1 AND t.tid = 2
WHEN MATCHED THEN
UPDATE SET src = src2
WHEN NOT MATCHED THEN
DO NOTHING;
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing MERGE INTO merge_schema.target_cj_4000050 t USING (merge_schema.source_cj1_4000054 s1 JOIN merge_schema.source_cj2_4000058 s2 ON ((s1.sid1 OPERATOR(pg_catalog.=) s2.sid2))) ON ((t.tid OPERATOR(pg_catalog.=) s1.sid1) AND (t.tid OPERATOR(pg_catalog.=) 2)) WHEN MATCHED THEN UPDATE SET src = s2.src2 WHEN NOT MATCHED THEN DO NOTHING
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
SET citus.log_remote_commands to false;
SELECT * FROM target_cj ORDER BY 1;
tid | src | val
---------------------------------------------------------------------
1 | target | 0
2 | source-2 | 0
2 | source-2 | 0
3 | target | 0
(4 rows)
ROLLBACK;
BEGIN;
-- try accessing columns from either side of the source join
MERGE INTO target_cj t
USING source_cj1 s2
INNER JOIN source_cj2 s1 ON sid1 = sid2 AND val1 = 10
ON t.tid = sid1 AND t.tid = 2
WHEN MATCHED THEN
UPDATE SET src = src1, val = val2
WHEN NOT MATCHED THEN
DO NOTHING;
SELECT * FROM target_cj ORDER BY 1;
tid | src | val
---------------------------------------------------------------------
1 | target | 0
2 | source-1 | 20
2 | source-1 | 20
3 | target | 0
(4 rows)
ROLLBACK;
-- sub-query as a source
BEGIN;
MERGE INTO target_cj t
USING (SELECT * FROM source_cj1 WHERE sid1 = 2) sub
ON t.tid = sub.sid1 AND t.tid = 2
WHEN MATCHED THEN
UPDATE SET src = sub.src1, val = val1
WHEN NOT MATCHED THEN
DO NOTHING;
SELECT * FROM target_cj ORDER BY 1;
tid | src | val
---------------------------------------------------------------------
1 | target | 0
2 | source-1 | 10
2 | source-1 | 10
3 | target | 0
(4 rows)
ROLLBACK;
-- Test self-join
BEGIN;
SELECT * FROM target_cj ORDER BY 1;
tid | src | val
---------------------------------------------------------------------
1 | target | 0
2 | target | 0
2 | target | 0
3 | target | 0
(4 rows)
set citus.log_remote_commands to true;
MERGE INTO target_cj t1
USING (SELECT * FROM target_cj) sub
ON t1.tid = sub.tid AND t1.tid = 3
WHEN MATCHED THEN
UPDATE SET src = sub.src, val = sub.val + 100
WHEN NOT MATCHED THEN
DO NOTHING;
NOTICE: issuing MERGE INTO merge_schema.target_cj_4000048 t1 USING (SELECT target_cj.tid, target_cj.src, target_cj.val FROM merge_schema.target_cj_4000048 target_cj) sub ON ((t1.tid OPERATOR(pg_catalog.=) sub.tid) AND (t1.tid OPERATOR(pg_catalog.=) 3)) WHEN MATCHED THEN UPDATE SET src = sub.src, val = (sub.val OPERATOR(pg_catalog.+) 100) WHEN NOT MATCHED THEN DO NOTHING
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
set citus.log_remote_commands to false;
SELECT * FROM target_cj ORDER BY 1;
tid | src | val
---------------------------------------------------------------------
1 | target | 0
2 | target | 0
2 | target | 0
3 | target | 100
(4 rows)
ROLLBACK;
-- Test PREPARE
PREPARE foo(int) AS
MERGE INTO target_cj target
USING (SELECT * FROM source_cj1) sub
ON target.tid = sub.sid1 AND target.tid = $1
WHEN MATCHED THEN
UPDATE SET val = sub.val1
WHEN NOT MATCHED THEN
DO NOTHING;
SELECT * FROM target_cj ORDER BY 1;
tid | src | val
---------------------------------------------------------------------
1 | target | 0
2 | target | 0
2 | target | 0
3 | target | 0
(4 rows)
BEGIN;
EXECUTE foo(2);
EXECUTE foo(2);
EXECUTE foo(2);
EXECUTE foo(2);
EXECUTE foo(2);
SELECT * FROM target_cj ORDER BY 1;
tid | src | val
---------------------------------------------------------------------
1 | target | 0
2 | target | 10
2 | target | 10
3 | target | 0
(4 rows)
ROLLBACK;
BEGIN;
SET citus.log_remote_commands to true;
SET client_min_messages TO DEBUG1;
EXECUTE foo(2);
DEBUG: <Deparsed MERGE query: MERGE INTO merge_schema.target_cj_4000047 target USING (SELECT source_cj1.sid1, source_cj1.src1, source_cj1.val1 FROM merge_schema.source_cj1_4000051 source_cj1) sub ON ((target.tid OPERATOR(pg_catalog.=) sub.sid1) AND (target.tid OPERATOR(pg_catalog.=) $1)) WHEN MATCHED THEN UPDATE SET val = sub.val1 WHEN NOT MATCHED THEN DO NOTHING >
DEBUG: <Deparsed MERGE query: MERGE INTO merge_schema.target_cj_4000048 target USING (SELECT source_cj1.sid1, source_cj1.src1, source_cj1.val1 FROM merge_schema.source_cj1_4000052 source_cj1) sub ON ((target.tid OPERATOR(pg_catalog.=) sub.sid1) AND (target.tid OPERATOR(pg_catalog.=) $1)) WHEN MATCHED THEN UPDATE SET val = sub.val1 WHEN NOT MATCHED THEN DO NOTHING >
DEBUG: <Deparsed MERGE query: MERGE INTO merge_schema.target_cj_4000049 target USING (SELECT source_cj1.sid1, source_cj1.src1, source_cj1.val1 FROM merge_schema.source_cj1_4000053 source_cj1) sub ON ((target.tid OPERATOR(pg_catalog.=) sub.sid1) AND (target.tid OPERATOR(pg_catalog.=) $1)) WHEN MATCHED THEN UPDATE SET val = sub.val1 WHEN NOT MATCHED THEN DO NOTHING >
DEBUG: <Deparsed MERGE query: MERGE INTO merge_schema.target_cj_4000050 target USING (SELECT source_cj1.sid1, source_cj1.src1, source_cj1.val1 FROM merge_schema.source_cj1_4000054 source_cj1) sub ON ((target.tid OPERATOR(pg_catalog.=) sub.sid1) AND (target.tid OPERATOR(pg_catalog.=) $1)) WHEN MATCHED THEN UPDATE SET val = sub.val1 WHEN NOT MATCHED THEN DO NOTHING >
DEBUG: <Deparsed MERGE query: MERGE INTO merge_schema.target_cj_4000050 target USING (SELECT source_cj1.sid1, source_cj1.src1, source_cj1.val1 FROM merge_schema.source_cj1_4000054 source_cj1) sub ON ((target.tid OPERATOR(pg_catalog.=) sub.sid1) AND (target.tid OPERATOR(pg_catalog.=) $1)) WHEN MATCHED THEN UPDATE SET val = sub.val1 WHEN NOT MATCHED THEN DO NOTHING >
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing MERGE INTO merge_schema.target_cj_4000050 target USING (SELECT source_cj1.sid1, source_cj1.src1, source_cj1.val1 FROM merge_schema.source_cj1_4000054 source_cj1) sub ON ((target.tid OPERATOR(pg_catalog.=) sub.sid1) AND (target.tid OPERATOR(pg_catalog.=) $1)) WHEN MATCHED THEN UPDATE SET val = sub.val1 WHEN NOT MATCHED THEN DO NOTHING
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
RESET client_min_messages;
EXECUTE foo(2);
NOTICE: issuing MERGE INTO merge_schema.target_cj_4000050 target USING (SELECT source_cj1.sid1, source_cj1.src1, source_cj1.val1 FROM merge_schema.source_cj1_4000054 source_cj1) sub ON ((target.tid OPERATOR(pg_catalog.=) sub.sid1) AND (target.tid OPERATOR(pg_catalog.=) $1)) WHEN MATCHED THEN UPDATE SET val = sub.val1 WHEN NOT MATCHED THEN DO NOTHING
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
SET citus.log_remote_commands to false;
SELECT * FROM target_cj ORDER BY 1;
tid | src | val
---------------------------------------------------------------------
1 | target | 0
2 | target | 10
2 | target | 10
3 | target | 0
(4 rows)
ROLLBACK;
-- --
-- Error and Unsupported scenarios -- Error and Unsupported scenarios
-- --
-- try updating the distribution key column
BEGIN;
MERGE INTO target_cj t
USING source_cj1 s
ON t.tid = s.sid1 AND t.tid = 2
WHEN MATCHED THEN
UPDATE SET tid = tid + 9, src = src || ' updated by merge'
WHEN NOT MATCHED THEN
INSERT VALUES (sid1, 'inserted by merge', val1);
ERROR: modifying the partition value of rows is not allowed
ROLLBACK;
-- Foreign table as target -- Foreign table as target
MERGE INTO foreign_table MERGE INTO foreign_table
USING ft_target ON (foreign_table.id = ft_target.id) USING ft_target ON (foreign_table.id = ft_target.id)
@ -1612,54 +1274,7 @@ MERGE INTO t1
UPDATE SET val = t1.val + 1 UPDATE SET val = t1.val + 1
WHEN NOT MATCHED THEN WHEN NOT MATCHED THEN
INSERT (id, val) VALUES (s1.id, s1.val); INSERT (id, val) VALUES (s1.id, s1.val);
ERROR: MERGE command is not supported with combination of distributed/local tables yet ERROR: MERGE command is not supported on distributed/reference tables yet
-- Now both s1 and t1 are distributed tables
SELECT undistribute_table('t1');
NOTICE: creating a new table for merge_schema.t1
NOTICE: moving the data of merge_schema.t1
NOTICE: dropping the old merge_schema.t1
NOTICE: renaming the new table to merge_schema.t1
undistribute_table
---------------------------------------------------------------------
(1 row)
SELECT create_distributed_table('t1', 'id');
NOTICE: Copying data from local table...
NOTICE: copying the data has completed
DETAIL: The local data in the table is no longer visible, but is still on disk.
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$merge_schema.t1$$)
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- We have a potential pitfall where a function can be invoked in
-- the MERGE conditions which can insert/update to a random shard
CREATE OR REPLACE function merge_when_and_write() RETURNS BOOLEAN
LANGUAGE PLPGSQL AS
$$
BEGIN
INSERT INTO t1 VALUES (100, 100);
RETURN TRUE;
END;
$$;
-- Test preventing "ON" join condition from writing to the database
BEGIN;
MERGE INTO t1
USING s1 ON t1.id = s1.id AND t1.id = 2 AND (merge_when_and_write())
WHEN MATCHED THEN
UPDATE SET val = t1.val + s1.val;
ERROR: functions used in the WHERE/ON/WHEN clause of modification queries on distributed tables must not be VOLATILE
ROLLBACK;
-- Test preventing WHEN clause(s) from writing to the database
BEGIN;
MERGE INTO t1
USING s1 ON t1.id = s1.id AND t1.id = 2
WHEN MATCHED AND (merge_when_and_write()) THEN
UPDATE SET val = t1.val + s1.val;
ERROR: functions used in the WHERE/ON/WHEN clause of modification queries on distributed tables must not be VOLATILE
ROLLBACK;
-- Joining on partition columns with sub-query -- Joining on partition columns with sub-query
MERGE INTO t1 MERGE INTO t1
USING (SELECT * FROM s1) sub ON (sub.val = t1.id) -- sub.val is not a distribution column USING (SELECT * FROM s1) sub ON (sub.val = t1.id) -- sub.val is not a distribution column
@ -1669,7 +1284,7 @@ MERGE INTO t1
UPDATE SET val = t1.val + 1 UPDATE SET val = t1.val + 1
WHEN NOT MATCHED THEN WHEN NOT MATCHED THEN
INSERT (id, val) VALUES (sub.id, sub.val); INSERT (id, val) VALUES (sub.id, sub.val);
ERROR: MERGE command is only supported when distributed tables are joined on their distribution column ERROR: MERGE command is not supported on distributed/reference tables yet
-- Joining on partition columns with CTE -- Joining on partition columns with CTE
WITH s1_res AS ( WITH s1_res AS (
SELECT * FROM s1 SELECT * FROM s1
@ -1682,7 +1297,7 @@ MERGE INTO t1
UPDATE SET val = t1.val + 1 UPDATE SET val = t1.val + 1
WHEN NOT MATCHED THEN WHEN NOT MATCHED THEN
INSERT (id, val) VALUES (s1_res.id, s1_res.val); INSERT (id, val) VALUES (s1_res.id, s1_res.val);
ERROR: MERGE command is only supported when distributed tables are joined on their distribution column ERROR: MERGE command is not supported on distributed/reference tables yet
-- Constant Join condition -- Constant Join condition
WITH s1_res AS ( WITH s1_res AS (
SELECT * FROM s1 SELECT * FROM s1
@ -1695,7 +1310,7 @@ MERGE INTO t1
UPDATE SET val = t1.val + 1 UPDATE SET val = t1.val + 1
WHEN NOT MATCHED THEN WHEN NOT MATCHED THEN
INSERT (id, val) VALUES (s1_res.id, s1_res.val); INSERT (id, val) VALUES (s1_res.id, s1_res.val);
ERROR: MERGE command is only supported when distributed tables are joined on their distribution column ERROR: MERGE command is not supported on distributed/reference tables yet
-- With a single WHEN clause, which causes a non-left join -- With a single WHEN clause, which causes a non-left join
WITH s1_res AS ( WITH s1_res AS (
SELECT * FROM s1 SELECT * FROM s1
@ -1704,7 +1319,7 @@ WITH s1_res AS (
WHEN MATCHED THEN DELETE WHEN MATCHED THEN DELETE
WHEN NOT MATCHED THEN WHEN NOT MATCHED THEN
INSERT (id, val) VALUES (s1_res.id, s1_res.val); INSERT (id, val) VALUES (s1_res.id, s1_res.val);
ERROR: MERGE command is only supported when distributed tables are joined on their distribution column ERROR: MERGE command is not supported on distributed/reference tables yet
-- --
-- Reference tables -- Reference tables
-- --
@ -1756,7 +1371,7 @@ MERGE INTO t1
UPDATE SET val = t1.val + 1 UPDATE SET val = t1.val + 1
WHEN NOT MATCHED THEN WHEN NOT MATCHED THEN
INSERT (id, val) VALUES (s1.id, s1.val); INSERT (id, val) VALUES (s1.id, s1.val);
ERROR: MERGE command is not supported on reference tables yet ERROR: MERGE command is not supported on distributed/reference tables yet
-- --
-- Postgres + Citus-Distributed table -- Postgres + Citus-Distributed table
-- --
@ -1798,7 +1413,7 @@ MERGE INTO t1
UPDATE SET val = t1.val + 1 UPDATE SET val = t1.val + 1
WHEN NOT MATCHED THEN WHEN NOT MATCHED THEN
INSERT (id, val) VALUES (s1.id, s1.val); INSERT (id, val) VALUES (s1.id, s1.val);
ERROR: MERGE command is not supported with combination of distributed/local tables yet ERROR: MERGE command is not supported on distributed/reference tables yet
MERGE INTO t1 MERGE INTO t1
USING (SELECT * FROM s1) sub ON (sub.id = t1.id) USING (SELECT * FROM s1) sub ON (sub.id = t1.id)
WHEN MATCHED AND sub.val = 0 THEN WHEN MATCHED AND sub.val = 0 THEN
@ -1807,7 +1422,7 @@ MERGE INTO t1
UPDATE SET val = t1.val + 1 UPDATE SET val = t1.val + 1
WHEN NOT MATCHED THEN WHEN NOT MATCHED THEN
INSERT (id, val) VALUES (sub.id, sub.val); INSERT (id, val) VALUES (sub.id, sub.val);
ERROR: MERGE command is not supported with combination of distributed/local tables yet ERROR: MERGE command is not supported on distributed/reference tables yet
CREATE TABLE pg(val int); CREATE TABLE pg(val int);
SELECT create_distributed_table('s1', 'id'); SELECT create_distributed_table('s1', 'id');
NOTICE: Copying data from local table... NOTICE: Copying data from local table...
@ -1828,7 +1443,7 @@ MERGE INTO t1
UPDATE SET val = t1.val + 1 UPDATE SET val = t1.val + 1
WHEN NOT MATCHED THEN WHEN NOT MATCHED THEN
INSERT (id, val) VALUES (sub.id, sub.val); INSERT (id, val) VALUES (sub.id, sub.val);
ERROR: MERGE command is not supported with combination of distributed/local tables yet ERROR: MERGE command is not supported on distributed/reference tables yet
-- Mix Postgres table in CTE -- Mix Postgres table in CTE
WITH pg_res AS ( WITH pg_res AS (
SELECT * FROM pg SELECT * FROM pg
@ -1841,7 +1456,7 @@ MERGE INTO t1
UPDATE SET val = t1.val + 1 UPDATE SET val = t1.val + 1
WHEN NOT MATCHED THEN WHEN NOT MATCHED THEN
INSERT (id, val) VALUES (sub.id, sub.val); INSERT (id, val) VALUES (sub.id, sub.val);
ERROR: MERGE command is not supported with combination of distributed/local tables yet ERROR: MERGE command is not supported on distributed/reference tables yet
-- Match more than one source row should fail same as Postgres behavior -- Match more than one source row should fail same as Postgres behavior
SELECT undistribute_table('t1'); SELECT undistribute_table('t1');
NOTICE: creating a new table for merge_schema.t1 NOTICE: creating a new table for merge_schema.t1
@ -1896,265 +1511,6 @@ WHEN NOT MATCHED THEN
INSERT VALUES(mv_source.id, mv_source.val); INSERT VALUES(mv_source.id, mv_source.val);
ERROR: cannot execute MERGE on relation "mv_source" ERROR: cannot execute MERGE on relation "mv_source"
DETAIL: This operation is not supported for materialized views. DETAIL: This operation is not supported for materialized views.
-- Distributed tables *must* be colocated
CREATE TABLE dist_target(id int, val varchar);
SELECT create_distributed_table('dist_target', 'id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE TABLE dist_source(id int, val varchar);
SELECT create_distributed_table('dist_source', 'id', colocate_with => 'none');
create_distributed_table
---------------------------------------------------------------------
(1 row)
MERGE INTO dist_target
USING dist_source
ON dist_target.id = dist_source.id
WHEN MATCHED THEN
UPDATE SET val = dist_source.val
WHEN NOT MATCHED THEN
INSERT VALUES(dist_source.id, dist_source.val);
ERROR: For MERGE command, all the distributed tables must be colocated
-- Distributed tables *must* be joined on distribution column
CREATE TABLE dist_colocated(id int, val int);
SELECT create_distributed_table('dist_colocated', 'id', colocate_with => 'dist_target');
create_distributed_table
---------------------------------------------------------------------
(1 row)
MERGE INTO dist_target
USING dist_colocated
ON dist_target.id = dist_colocated.val -- val is not the distribution column
WHEN MATCHED THEN
UPDATE SET val = dist_colocated.val
WHEN NOT MATCHED THEN
INSERT VALUES(dist_colocated.id, dist_colocated.val);
ERROR: MERGE command is only supported when distributed tables are joined on their distribution column
-- MERGE command must be joined with with a constant qual on target relation
-- AND clause is missing
MERGE INTO dist_target
USING dist_colocated
ON dist_target.id = dist_colocated.id
WHEN MATCHED THEN
UPDATE SET val = dist_colocated.val
WHEN NOT MATCHED THEN
INSERT VALUES(dist_colocated.id, dist_colocated.val);
ERROR: MERGE on a distributed table requires a constant filter on the distribution column of the target table
HINT: Consider adding AND target.dist_key = <> to the ON clause
-- AND clause incorrect table (must be target)
MERGE INTO dist_target
USING dist_colocated
ON dist_target.id = dist_colocated.id AND dist_colocated.id = 1
WHEN MATCHED THEN
UPDATE SET val = dist_colocated.val
WHEN NOT MATCHED THEN
INSERT VALUES(dist_colocated.id, dist_colocated.val);
ERROR: MERGE on a distributed table requires a constant filter on the distribution column of the target table
HINT: Consider adding AND target.dist_key = <> to the ON clause
-- AND clause incorrect column (must be distribution column)
MERGE INTO dist_target
USING dist_colocated
ON dist_target.id = dist_colocated.id AND dist_target.val = 'const'
WHEN MATCHED THEN
UPDATE SET val = dist_colocated.val
WHEN NOT MATCHED THEN
INSERT VALUES(dist_colocated.id, dist_colocated.val);
ERROR: MERGE on a distributed table requires a constant filter on the distribution column of the target table
HINT: Consider adding AND target.dist_key = <> to the ON clause
-- Both the source and target must be distributed
MERGE INTO dist_target
USING (SELECT 100 id) AS source
ON dist_target.id = source.id AND dist_target.val = 'const'
WHEN MATCHED THEN
UPDATE SET val = 'source'
WHEN NOT MATCHED THEN
INSERT VALUES(source.id, 'source');
ERROR: For MERGE command, both the source and target must be distributed
-- Non-hash distributed tables (append/range).
CREATE VIEW show_tables AS
SELECT logicalrelid, partmethod
FROM pg_dist_partition
WHERE (logicalrelid = 'dist_target'::regclass) OR (logicalrelid = 'dist_source'::regclass)
ORDER BY 1;
SELECT undistribute_table('dist_source');
NOTICE: creating a new table for merge_schema.dist_source
NOTICE: moving the data of merge_schema.dist_source
NOTICE: dropping the old merge_schema.dist_source
NOTICE: drop cascades to view show_tables
CONTEXT: SQL statement "DROP TABLE merge_schema.dist_source CASCADE"
NOTICE: renaming the new table to merge_schema.dist_source
undistribute_table
---------------------------------------------------------------------
(1 row)
SELECT create_distributed_table('dist_source', 'id', 'append');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT * FROM show_tables;
logicalrelid | partmethod
---------------------------------------------------------------------
dist_target | h
dist_source | a
(2 rows)
MERGE INTO dist_target
USING dist_source
ON dist_target.id = dist_source.id
WHEN MATCHED THEN
UPDATE SET val = dist_source.val
WHEN NOT MATCHED THEN
INSERT VALUES(dist_source.id, dist_source.val);
ERROR: For MERGE command, all the distributed tables must be colocated, for append/range distribution, colocation is not supported
HINT: Consider using hash distribution instead
SELECT undistribute_table('dist_source');
NOTICE: creating a new table for merge_schema.dist_source
NOTICE: moving the data of merge_schema.dist_source
NOTICE: dropping the old merge_schema.dist_source
NOTICE: drop cascades to view show_tables
CONTEXT: SQL statement "DROP TABLE merge_schema.dist_source CASCADE"
NOTICE: renaming the new table to merge_schema.dist_source
undistribute_table
---------------------------------------------------------------------
(1 row)
SELECT create_distributed_table('dist_source', 'id', 'range');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT * FROM show_tables;
logicalrelid | partmethod
---------------------------------------------------------------------
dist_target | h
dist_source | r
(2 rows)
MERGE INTO dist_target
USING dist_source
ON dist_target.id = dist_source.id
WHEN MATCHED THEN
UPDATE SET val = dist_source.val
WHEN NOT MATCHED THEN
INSERT VALUES(dist_source.id, dist_source.val);
ERROR: For MERGE command, all the distributed tables must be colocated, for append/range distribution, colocation is not supported
HINT: Consider using hash distribution instead
-- Both are append tables
SELECT undistribute_table('dist_target');
NOTICE: creating a new table for merge_schema.dist_target
NOTICE: moving the data of merge_schema.dist_target
NOTICE: dropping the old merge_schema.dist_target
NOTICE: drop cascades to view show_tables
CONTEXT: SQL statement "DROP TABLE merge_schema.dist_target CASCADE"
NOTICE: renaming the new table to merge_schema.dist_target
undistribute_table
---------------------------------------------------------------------
(1 row)
SELECT undistribute_table('dist_source');
NOTICE: creating a new table for merge_schema.dist_source
NOTICE: moving the data of merge_schema.dist_source
NOTICE: dropping the old merge_schema.dist_source
NOTICE: drop cascades to view show_tables
CONTEXT: SQL statement "DROP TABLE merge_schema.dist_source CASCADE"
NOTICE: renaming the new table to merge_schema.dist_source
undistribute_table
---------------------------------------------------------------------
(1 row)
SELECT create_distributed_table('dist_target', 'id', 'append');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT create_distributed_table('dist_source', 'id', 'append');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT * FROM show_tables;
logicalrelid | partmethod
---------------------------------------------------------------------
dist_target | a
dist_source | a
(2 rows)
MERGE INTO dist_target
USING dist_source
ON dist_target.id = dist_source.id
WHEN MATCHED THEN
UPDATE SET val = dist_source.val
WHEN NOT MATCHED THEN
INSERT VALUES(dist_source.id, dist_source.val);
ERROR: For MERGE command, all the distributed tables must be colocated, for append/range distribution, colocation is not supported
HINT: Consider using hash distribution instead
-- Both are range tables
SELECT undistribute_table('dist_target');
NOTICE: creating a new table for merge_schema.dist_target
NOTICE: moving the data of merge_schema.dist_target
NOTICE: dropping the old merge_schema.dist_target
NOTICE: drop cascades to view show_tables
CONTEXT: SQL statement "DROP TABLE merge_schema.dist_target CASCADE"
NOTICE: renaming the new table to merge_schema.dist_target
undistribute_table
---------------------------------------------------------------------
(1 row)
SELECT undistribute_table('dist_source');
NOTICE: creating a new table for merge_schema.dist_source
NOTICE: moving the data of merge_schema.dist_source
NOTICE: dropping the old merge_schema.dist_source
NOTICE: drop cascades to view show_tables
CONTEXT: SQL statement "DROP TABLE merge_schema.dist_source CASCADE"
NOTICE: renaming the new table to merge_schema.dist_source
undistribute_table
---------------------------------------------------------------------
(1 row)
SELECT create_distributed_table('dist_target', 'id', 'range');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT create_distributed_table('dist_source', 'id', 'range');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT * FROM show_tables;
logicalrelid | partmethod
---------------------------------------------------------------------
dist_target | r
dist_source | r
(2 rows)
MERGE INTO dist_target
USING dist_source
ON dist_target.id = dist_source.id
WHEN MATCHED THEN
UPDATE SET val = dist_source.val
WHEN NOT MATCHED THEN
INSERT VALUES(dist_source.id, dist_source.val);
ERROR: For MERGE command, all the distributed tables must be colocated, for append/range distribution, colocation is not supported
HINT: Consider using hash distribution instead
DROP SERVER foreign_server CASCADE; DROP SERVER foreign_server CASCADE;
NOTICE: drop cascades to 3 other objects NOTICE: drop cascades to 3 other objects
DETAIL: drop cascades to user mapping for postgres on server foreign_server DETAIL: drop cascades to user mapping for postgres on server foreign_server
@ -2163,9 +1519,8 @@ drop cascades to foreign table foreign_table
NOTICE: foreign table "foreign_table_4000046" does not exist, skipping NOTICE: foreign table "foreign_table_4000046" does not exist, skipping
CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false)" CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false)"
PL/pgSQL function citus_drop_trigger() line XX at PERFORM PL/pgSQL function citus_drop_trigger() line XX at PERFORM
DROP FUNCTION merge_when_and_write();
DROP SCHEMA merge_schema CASCADE; DROP SCHEMA merge_schema CASCADE;
NOTICE: drop cascades to 63 other objects NOTICE: drop cascades to 56 other objects
DETAIL: drop cascades to function insert_data() DETAIL: drop cascades to function insert_data()
drop cascades to table pg_result drop cascades to table pg_result
drop cascades to table local_local drop cascades to table local_local
@ -2217,18 +1572,11 @@ drop cascades to table ft_target
drop cascades to table ft_source_4000045 drop cascades to table ft_source_4000045
drop cascades to table ft_source drop cascades to table ft_source
drop cascades to extension postgres_fdw drop cascades to extension postgres_fdw
drop cascades to table target_cj
drop cascades to table source_cj1
drop cascades to table source_cj2
drop cascades to table pg drop cascades to table pg
drop cascades to table t1_4000078 drop cascades to table t1_4000062
drop cascades to table s1_4000079 drop cascades to table s1_4000063
drop cascades to table t1 drop cascades to table t1
drop cascades to table s1 drop cascades to table s1
drop cascades to table dist_colocated
drop cascades to table dist_target
drop cascades to table dist_source
drop cascades to view show_tables
SELECT 1 FROM master_remove_node('localhost', :master_port); SELECT 1 FROM master_remove_node('localhost', :master_port);
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -711,21 +711,6 @@ SET LOCAL application_name to 'citus_internal gpid=10000000001';
SET citus.enable_ddl_propagation TO OFF; SET citus.enable_ddl_propagation TO OFF;
-- alter table triggers SELECT, and auto_explain catches that -- alter table triggers SELECT, and auto_explain catches that
ALTER TABLE target_table ADD CONSTRAINT fkey_167 FOREIGN KEY (col_1) REFERENCES test_ref_table(key) ON DELETE CASCADE; ALTER TABLE target_table ADD CONSTRAINT fkey_167 FOREIGN KEY (col_1) REFERENCES test_ref_table(key) ON DELETE CASCADE;
LOG: duration: xxxx ms plan:
{
"Query Text": "SELECT fk.\"col_1\" FROM ONLY \"test_auto_explain\".\"target_table\" fk LEFT OUTER JOIN ONLY \"test_auto_explain\".\"test_ref_table\" pk ON ( pk.\"key\" OPERATOR(pg_catalog.=) fk.\"col_1\") WHERE pk.\"key\" IS NULL AND (fk.\"col_1\" IS NOT NULL)",
"Plan": {
"Node Type": "Custom Scan",
"Custom Plan Provider": "Citus Adaptive",
"Parallel Aware": false,
"Startup Cost": 0.00,
"Total Cost": xxxx,
"Plan Rows": 100000,
"Plan Width": 4,
"Citus Explain Scan": "Explain for triggered constraint validation queries during ALTER TABLE commands are not supported by Citus"
}
}
CONTEXT: SQL statement "SELECT fk."col_1" FROM ONLY "test_auto_explain"."target_table" fk LEFT OUTER JOIN ONLY "test_auto_explain"."test_ref_table" pk ON ( pk."key" OPERATOR(pg_catalog.=) fk."col_1") WHERE pk."key" IS NULL AND (fk."col_1" IS NOT NULL)"
END; END;
RESET citus.enable_ddl_propagation; RESET citus.enable_ddl_propagation;
SET client_min_messages to ERROR; SET client_min_messages to ERROR;

View File

@ -44,6 +44,73 @@ SELECT con.conname
\c - - :master_host :master_port \c - - :master_host :master_port
ALTER TABLE AT_AddConstNoName.products DROP CONSTRAINT products_pkey; ALTER TABLE AT_AddConstNoName.products DROP CONSTRAINT products_pkey;
-- Check "ADD PRIMARY KEY USING INDEX ..."
CREATE TABLE AT_AddConstNoName.tbl(col1 int, col2 int);
SELECT create_distributed_table('AT_AddConstNoName.tbl', 'col1');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE UNIQUE INDEX my_index ON AT_AddConstNoName.tbl(col1);
ALTER TABLE AT_AddConstNoName.tbl ADD PRIMARY KEY USING INDEX my_index;
SELECT con.conname
FROM pg_catalog.pg_constraint con
INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid
INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace
WHERE rel.relname = 'tbl';
conname
---------------------------------------------------------------------
my_index
(1 row)
\c - - :public_worker_1_host :worker_1_port
SELECT con.conname
FROM pg_catalog.pg_constraint con
INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid
INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace
WHERE rel.relname LIKE 'tbl%' ORDER BY con.conname ASC;
conname
---------------------------------------------------------------------
my_index
my_index_5410004
my_index_5410005
my_index_5410006
my_index_5410007
(5 rows)
\c - - :master_host :master_port
ALTER TABLE AT_AddConstNoName.tbl DROP CONSTRAINT my_index;
-- Check "ADD UNIQUE USING INDEX ..."
CREATE UNIQUE INDEX my_index ON AT_AddConstNoName.tbl(col1);
ALTER TABLE AT_AddConstNoName.tbl ADD UNIQUE USING INDEX my_index;
SELECT con.conname
FROM pg_catalog.pg_constraint con
INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid
INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace
WHERE rel.relname = 'tbl';
conname
---------------------------------------------------------------------
my_index
(1 row)
\c - - :public_worker_1_host :worker_1_port
SELECT con.conname
FROM pg_catalog.pg_constraint con
INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid
INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace
WHERE rel.relname LIKE 'tbl%'ORDER BY con.conname ASC;
conname
---------------------------------------------------------------------
my_index
my_index_5410004
my_index_5410005
my_index_5410006
my_index_5410007
(5 rows)
\c - - :master_host :master_port
ALTER TABLE AT_AddConstNoName.tbl DROP CONSTRAINT my_index;
-- Check "ADD PRIMARY KEY DEFERRABLE" -- Check "ADD PRIMARY KEY DEFERRABLE"
ALTER TABLE AT_AddConstNoName.products ADD PRIMARY KEY(product_no) DEFERRABLE; ALTER TABLE AT_AddConstNoName.products ADD PRIMARY KEY(product_no) DEFERRABLE;
\c - - :public_worker_1_host :worker_1_port \c - - :public_worker_1_host :worker_1_port
@ -268,7 +335,7 @@ SELECT con.conname, con.connoinherit
(1 row) (1 row)
\c - - :public_worker_1_host :worker_1_port \c - - :public_worker_1_host :worker_1_port
SELECT con.conname, connoinherit SELECT con.conname, con.connoinherit
FROM pg_catalog.pg_constraint con FROM pg_catalog.pg_constraint con
INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid
INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace
@ -278,6 +345,31 @@ SELECT con.conname, connoinherit
products_check_5410000 | t products_check_5410000 | t
(1 row) (1 row)
\c - - :master_host :master_port
ALTER TABLE AT_AddConstNoName.products DROP CONSTRAINT products_check;
-- Check "ADD CHECK ... NOT VALID"
ALTER TABLE AT_AddConstNoName.products ADD CHECK (product_no > 0 AND price > 0) NOT VALID;
SELECT con.conname, con.convalidated
FROM pg_catalog.pg_constraint con
INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid
INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace
WHERE rel.relname = 'products';
conname | convalidated
---------------------------------------------------------------------
products_check | f
(1 row)
\c - - :public_worker_1_host :worker_1_port
SELECT con.conname, con.convalidated
FROM pg_catalog.pg_constraint con
INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid
INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace
WHERE rel.relname = 'products_5410000';
conname | convalidated
---------------------------------------------------------------------
products_check_5410000 | f
(1 row)
\c - - :master_host :master_port \c - - :master_host :master_port
ALTER TABLE AT_AddConstNoName.products DROP CONSTRAINT products_check; ALTER TABLE AT_AddConstNoName.products DROP CONSTRAINT products_check;
DROP TABLE AT_AddConstNoName.products; DROP TABLE AT_AddConstNoName.products;
@ -404,10 +496,10 @@ SELECT con.conname
WHERE rel.relname LIKE 'very%' ORDER BY con.conname ASC; WHERE rel.relname LIKE 'very%' ORDER BY con.conname ASC;
conname conname
--------------------------------------------------------------------- ---------------------------------------------------------------------
verylonglonglonglonglonglonglonglonglonglonglo_559ab79d_5410006 verylonglonglonglonglonglonglonglonglonglonglo_559ab79d_5410010
verylonglonglonglonglonglonglonglonglonglonglo_559ab79d_5410007 verylonglonglonglonglonglonglonglonglonglonglo_559ab79d_5410011
verylonglonglonglonglonglonglonglonglonglonglo_559ab79d_5410008 verylonglonglonglonglonglonglonglonglonglonglo_559ab79d_5410012
verylonglonglonglonglonglonglonglonglonglonglo_559ab79d_5410009 verylonglonglonglonglonglonglonglonglonglonglo_559ab79d_5410013
verylonglonglonglonglonglonglonglonglonglonglonglonglonglo_pkey verylonglonglonglonglonglonglonglonglonglonglonglonglonglo_pkey
(5 rows) (5 rows)
@ -447,10 +539,10 @@ SELECT con.conname
WHERE rel.relname LIKE 'very%' ORDER BY con.conname ASC; WHERE rel.relname LIKE 'very%' ORDER BY con.conname ASC;
conname conname
--------------------------------------------------------------------- ---------------------------------------------------------------------
verylonglonglonglonglonglonglonglonglonglonglo_cd61b0cf_5410006 verylonglonglonglonglonglonglonglonglonglonglo_cd61b0cf_5410010
verylonglonglonglonglonglonglonglonglonglonglo_cd61b0cf_5410007 verylonglonglonglonglonglonglonglonglonglonglo_cd61b0cf_5410011
verylonglonglonglonglonglonglonglonglonglonglo_cd61b0cf_5410008 verylonglonglonglonglonglonglonglonglonglonglo_cd61b0cf_5410012
verylonglonglonglonglonglonglonglonglonglonglo_cd61b0cf_5410009 verylonglonglonglonglonglonglonglonglonglonglo_cd61b0cf_5410013
verylonglonglonglonglonglonglonglonglonglonglong_product_no_key verylonglonglonglonglonglonglonglonglonglonglong_product_no_key
(5 rows) (5 rows)
@ -490,10 +582,10 @@ SELECT con.conname
WHERE rel.relname LIKE 'very%' ORDER BY con.conname ASC; WHERE rel.relname LIKE 'very%' ORDER BY con.conname ASC;
conname conname
--------------------------------------------------------------------- ---------------------------------------------------------------------
verylonglonglonglonglonglonglonglonglonglonglo_057ed027_5410006 verylonglonglonglonglonglonglonglonglonglonglo_057ed027_5410010
verylonglonglonglonglonglonglonglonglonglonglo_057ed027_5410007 verylonglonglonglonglonglonglonglonglonglonglo_057ed027_5410011
verylonglonglonglonglonglonglonglonglonglonglo_057ed027_5410008 verylonglonglonglonglonglonglonglonglonglonglo_057ed027_5410012
verylonglonglonglonglonglonglonglonglonglonglo_057ed027_5410009 verylonglonglonglonglonglonglonglonglonglonglo_057ed027_5410013
verylonglonglonglonglonglonglonglonglonglonglon_product_no_excl verylonglonglonglonglonglonglonglonglonglonglon_product_no_excl
(5 rows) (5 rows)
@ -533,10 +625,10 @@ SELECT con.conname
WHERE rel.relname LIKE 'very%' ORDER BY con.conname ASC; WHERE rel.relname LIKE 'very%' ORDER BY con.conname ASC;
conname conname
--------------------------------------------------------------------- ---------------------------------------------------------------------
verylonglonglonglonglonglonglonglonglonglonglo_d943e063_5410006 verylonglonglonglonglonglonglonglonglonglonglo_d943e063_5410010
verylonglonglonglonglonglonglonglonglonglonglo_d943e063_5410007 verylonglonglonglonglonglonglonglonglonglonglo_d943e063_5410011
verylonglonglonglonglonglonglonglonglonglonglo_d943e063_5410008 verylonglonglonglonglonglonglonglonglonglonglo_d943e063_5410012
verylonglonglonglonglonglonglonglonglonglonglo_d943e063_5410009 verylonglonglonglonglonglonglonglonglonglonglo_d943e063_5410013
verylonglonglonglonglonglonglonglonglonglonglonglonglongl_check verylonglonglonglonglonglonglonglonglonglonglonglonglongl_check
(5 rows) (5 rows)
@ -593,10 +685,10 @@ SELECT con.conname
WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' ORDER BY con.conname ASC; WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' ORDER BY con.conname ASC;
conname conname
--------------------------------------------------------------------- ---------------------------------------------------------------------
longlonglonglonglonglonglonglonglonglonglonglo_9e4e3069_5410014 longlonglonglonglonglonglonglonglonglonglonglo_9e4e3069_5410018
longlonglonglonglonglonglonglonglonglonglonglo_9e4e3069_5410015 longlonglonglonglonglonglonglonglonglonglonglo_9e4e3069_5410019
longlonglonglonglonglonglonglonglonglonglonglo_9e4e3069_5410016 longlonglonglonglonglonglonglonglonglonglonglo_9e4e3069_5410020
longlonglonglonglonglonglonglonglonglonglonglo_9e4e3069_5410017 longlonglonglonglonglonglonglonglonglonglonglo_9e4e3069_5410021
longlonglonglonglonglonglonglonglonglonglonglonglonglonglo_pkey longlonglonglonglonglonglonglonglonglonglonglonglonglonglo_pkey
(5 rows) (5 rows)
@ -639,10 +731,10 @@ SELECT con.conname
WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' ORDER BY con.conname ASC; WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' ORDER BY con.conname ASC;
conname conname
--------------------------------------------------------------------- ---------------------------------------------------------------------
longlonglonglonglonglonglonglonglonglonglongl__d794d9f1_5410014 longlonglonglonglonglonglonglonglonglonglongl__d794d9f1_5410018
longlonglonglonglonglonglonglonglonglonglongl__d794d9f1_5410015 longlonglonglonglonglonglonglonglonglonglongl__d794d9f1_5410019
longlonglonglonglonglonglonglonglonglonglongl__d794d9f1_5410016 longlonglonglonglonglonglonglonglonglonglongl__d794d9f1_5410020
longlonglonglonglonglonglonglonglonglonglongl__d794d9f1_5410017 longlonglonglonglonglonglonglonglonglonglongl__d794d9f1_5410021
longlonglonglonglonglonglonglonglonglonglongl_partition_col_key longlonglonglonglonglonglonglonglonglonglongl_partition_col_key
(5 rows) (5 rows)
@ -795,7 +887,7 @@ SELECT con.conname
conname conname
--------------------------------------------------------------------- ---------------------------------------------------------------------
citus_local_table_pkey citus_local_table_pkey
citus_local_table_pkey_5410022 citus_local_table_pkey_5410026
(2 rows) (2 rows)
SELECT create_distributed_table('AT_AddConstNoName.citus_local_table','id'); SELECT create_distributed_table('AT_AddConstNoName.citus_local_table','id');
@ -823,10 +915,10 @@ SELECT con.conname
conname conname
--------------------------------------------------------------------- ---------------------------------------------------------------------
citus_local_table_pkey citus_local_table_pkey
citus_local_table_pkey_5410023 citus_local_table_pkey_5410027
citus_local_table_pkey_5410024 citus_local_table_pkey_5410028
citus_local_table_pkey_5410025 citus_local_table_pkey_5410029
citus_local_table_pkey_5410026 citus_local_table_pkey_5410030
(5 rows) (5 rows)
\c - - :master_host :master_port \c - - :master_host :master_port
@ -854,10 +946,10 @@ SELECT con.conname
conname conname
--------------------------------------------------------------------- ---------------------------------------------------------------------
citus_local_table_id_key citus_local_table_id_key
citus_local_table_id_key_5410023 citus_local_table_id_key_5410027
citus_local_table_id_key_5410024 citus_local_table_id_key_5410028
citus_local_table_id_key_5410025 citus_local_table_id_key_5410029
citus_local_table_id_key_5410026 citus_local_table_id_key_5410030
(5 rows) (5 rows)
\c - - :master_host :master_port \c - - :master_host :master_port
@ -895,10 +987,10 @@ SELECT con.conname
conname conname
--------------------------------------------------------------------- ---------------------------------------------------------------------
citus_local_table_id_excl citus_local_table_id_excl
citus_local_table_id_excl_5410023 citus_local_table_id_excl_5410027
citus_local_table_id_excl_5410024 citus_local_table_id_excl_5410028
citus_local_table_id_excl_5410025 citus_local_table_id_excl_5410029
citus_local_table_id_excl_5410026 citus_local_table_id_excl_5410030
(5 rows) (5 rows)
\c - - :master_host :master_port \c - - :master_host :master_port
@ -936,10 +1028,10 @@ SELECT con.conname
conname conname
--------------------------------------------------------------------- ---------------------------------------------------------------------
citus_local_table_check citus_local_table_check
citus_local_table_check_5410023 citus_local_table_check_5410027
citus_local_table_check_5410024 citus_local_table_check_5410028
citus_local_table_check_5410025 citus_local_table_check_5410029
citus_local_table_check_5410026 citus_local_table_check_5410030
(5 rows) (5 rows)
\c - - :master_host :master_port \c - - :master_host :master_port
@ -989,10 +1081,10 @@ SELECT con.conname
WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' ORDER BY con.conname ASC; WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' ORDER BY con.conname ASC;
conname conname
--------------------------------------------------------------------- ---------------------------------------------------------------------
longlonglonglonglonglonglonglonglonglonglonglo_9e4e3069_5410034 longlonglonglonglonglonglonglonglonglonglonglo_9e4e3069_5410038
longlonglonglonglonglonglonglonglonglonglonglo_9e4e3069_5410035 longlonglonglonglonglonglonglonglonglonglonglo_9e4e3069_5410039
longlonglonglonglonglonglonglonglonglonglonglo_9e4e3069_5410036 longlonglonglonglonglonglonglonglonglonglonglo_9e4e3069_5410040
longlonglonglonglonglonglonglonglonglonglonglo_9e4e3069_5410037 longlonglonglonglonglonglonglonglonglonglonglo_9e4e3069_5410041
longlonglonglonglonglonglonglonglonglonglonglonglonglonglo_pkey longlonglonglonglonglonglonglonglonglonglonglonglonglonglo_pkey
(5 rows) (5 rows)
@ -1026,10 +1118,10 @@ SELECT con.conname
WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' ORDER BY con.conname ASC; WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' ORDER BY con.conname ASC;
conname conname
--------------------------------------------------------------------- ---------------------------------------------------------------------
longlonglonglonglonglonglonglonglonglonglongl__d794d9f1_5410034 longlonglonglonglonglonglonglonglonglonglongl__d794d9f1_5410038
longlonglonglonglonglonglonglonglonglonglongl__d794d9f1_5410035 longlonglonglonglonglonglonglonglonglonglongl__d794d9f1_5410039
longlonglonglonglonglonglonglonglonglonglongl__d794d9f1_5410036 longlonglonglonglonglonglonglonglonglonglongl__d794d9f1_5410040
longlonglonglonglonglonglonglonglonglonglongl__d794d9f1_5410037 longlonglonglonglonglonglonglonglonglonglongl__d794d9f1_5410041
longlonglonglonglonglonglonglonglonglonglongl_partition_col_key longlonglonglonglonglonglonglonglonglonglongl_partition_col_key
(5 rows) (5 rows)
@ -1119,10 +1211,10 @@ SELECT con.conname
conname conname
--------------------------------------------------------------------- ---------------------------------------------------------------------
2nd table_pkey 2nd table_pkey
2nd table_pkey_5410042 2nd table_pkey_5410046
2nd table_pkey_5410043 2nd table_pkey_5410047
2nd table_pkey_5410044 2nd table_pkey_5410048
2nd table_pkey_5410045 2nd table_pkey_5410049
(5 rows) (5 rows)
\c - - :master_host :master_port \c - - :master_host :master_port
@ -1149,10 +1241,10 @@ SELECT con.conname
conname conname
--------------------------------------------------------------------- ---------------------------------------------------------------------
2nd table_2nd id_3rd id_key 2nd table_2nd id_3rd id_key
2nd table_2nd id_3rd id_key_5410042 2nd table_2nd id_3rd id_key_5410046
2nd table_2nd id_3rd id_key_5410043 2nd table_2nd id_3rd id_key_5410047
2nd table_2nd id_3rd id_key_5410044 2nd table_2nd id_3rd id_key_5410048
2nd table_2nd id_3rd id_key_5410045 2nd table_2nd id_3rd id_key_5410049
(5 rows) (5 rows)
\c - - :master_host :master_port \c - - :master_host :master_port
@ -1179,10 +1271,10 @@ SELECT con.conname
conname conname
--------------------------------------------------------------------- ---------------------------------------------------------------------
2nd table_2nd id_excl 2nd table_2nd id_excl
2nd table_2nd id_excl_5410042 2nd table_2nd id_excl_5410046
2nd table_2nd id_excl_5410043 2nd table_2nd id_excl_5410047
2nd table_2nd id_excl_5410044 2nd table_2nd id_excl_5410048
2nd table_2nd id_excl_5410045 2nd table_2nd id_excl_5410049
(5 rows) (5 rows)
\c - - :master_host :master_port \c - - :master_host :master_port
@ -1209,20 +1301,21 @@ SELECT con.conname
conname conname
--------------------------------------------------------------------- ---------------------------------------------------------------------
2nd table_check 2nd table_check
2nd table_check_5410042 2nd table_check_5410046
2nd table_check_5410043 2nd table_check_5410047
2nd table_check_5410044 2nd table_check_5410048
2nd table_check_5410045 2nd table_check_5410049
(5 rows) (5 rows)
\c - - :master_host :master_port \c - - :master_host :master_port
ALTER TABLE AT_AddConstNoName."2nd table" DROP CONSTRAINT "2nd table_check"; ALTER TABLE AT_AddConstNoName."2nd table" DROP CONSTRAINT "2nd table_check";
DROP EXTENSION btree_gist; DROP EXTENSION btree_gist;
DROP SCHEMA AT_AddConstNoName CASCADE; DROP SCHEMA AT_AddConstNoName CASCADE;
NOTICE: drop cascades to 6 other objects NOTICE: drop cascades to 7 other objects
DETAIL: drop cascades to table at_addconstnoname.products_ref_2 DETAIL: drop cascades to table at_addconstnoname.tbl
drop cascades to table at_addconstnoname.products_ref_2
drop cascades to table at_addconstnoname.products_ref_3 drop cascades to table at_addconstnoname.products_ref_3
drop cascades to table at_addconstnoname.verylonglonglonglonglonglonglonglonglonglonglonglonglonglonglon drop cascades to table at_addconstnoname.verylonglonglonglonglonglonglonglonglonglonglonglonglonglonglon
drop cascades to table at_addconstnoname.products_ref_3_5410005 drop cascades to table at_addconstnoname.products_ref_3_5410009
drop cascades to table at_addconstnoname.citus_local_partitioned_table drop cascades to table at_addconstnoname.citus_local_partitioned_table
drop cascades to table at_addconstnoname."2nd table" drop cascades to table at_addconstnoname."2nd table"

View File

@ -248,6 +248,34 @@ SELECT con.conname, con.confupdtype, con.confdeltype, con.confmatchtype
referencing_table_ref_id_fkey_1770043 | a | c | s referencing_table_ref_id_fkey_1770043 | a | c | s
(3 rows) (3 rows)
\c - - :master_host :master_port
SET SEARCH_PATH = at_add_fk;
ALTER TABLE referencing_table DROP CONSTRAINT referencing_table_ref_id_fkey;
-- test NOT VALID
ALTER TABLE referencing_table ADD FOREIGN KEY(ref_id) REFERENCES referenced_table(id) NOT VALID;
SELECT con.conname, con.convalidated
FROM pg_catalog.pg_constraint con
INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid
INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace
WHERE rel.relname = 'referencing_table';
conname | convalidated
---------------------------------------------------------------------
referencing_table_ref_id_fkey | f
(1 row)
\c - - :public_worker_1_host :worker_1_port
SELECT con.conname, con.convalidated
FROM pg_catalog.pg_constraint con
INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid
INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace
WHERE rel.relname LIKE 'referencing_table%' ORDER BY con.conname ASC;
conname | convalidated
---------------------------------------------------------------------
referencing_table_ref_id_fkey | f
referencing_table_ref_id_fkey_1770041 | f
referencing_table_ref_id_fkey_1770043 | f
(3 rows)
\c - - :master_host :master_port \c - - :master_host :master_port
SET SEARCH_PATH = at_add_fk; SET SEARCH_PATH = at_add_fk;
ALTER TABLE referencing_table DROP CONSTRAINT referencing_table_ref_id_fkey; ALTER TABLE referencing_table DROP CONSTRAINT referencing_table_ref_id_fkey;

View File

@ -773,6 +773,14 @@ SELECT 1 FROM columnar_table; -- seq scan
ERROR: loaded Citus library version differs from installed extension version ERROR: loaded Citus library version differs from installed extension version
CREATE TABLE new_columnar_table (a int) USING columnar; CREATE TABLE new_columnar_table (a int) USING columnar;
ERROR: loaded Citus library version differs from installed extension version ERROR: loaded Citus library version differs from installed extension version
-- disable version checks for other sessions too
ALTER SYSTEM SET citus.enable_version_checks TO OFF;
SELECT pg_reload_conf();
pg_reload_conf
---------------------------------------------------------------------
t
(1 row)
-- do cleanup for the rest of the tests -- do cleanup for the rest of the tests
SET citus.enable_version_checks TO OFF; SET citus.enable_version_checks TO OFF;
SET columnar.enable_version_checks TO OFF; SET columnar.enable_version_checks TO OFF;
@ -1303,12 +1311,28 @@ SELECT * FROM multi_extension.print_extension_changes();
| type cluster_clock | type cluster_clock
(38 rows) (38 rows)
-- Test downgrade to 11.2-1 from 11.3-1
ALTER EXTENSION citus UPDATE TO '11.3-1';
ALTER EXTENSION citus UPDATE TO '11.2-1';
-- Should be empty result since upgrade+downgrade should be a no-op
SELECT * FROM multi_extension.print_extension_changes();
previous_object | current_object
---------------------------------------------------------------------
(0 rows)
-- Snapshot of state at 11.3-1
ALTER EXTENSION citus UPDATE TO '11.3-1';
SELECT * FROM multi_extension.print_extension_changes();
previous_object | current_object
---------------------------------------------------------------------
(0 rows)
DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff;
-- show running version -- show running version
SHOW citus.version; SHOW citus.version;
citus.version citus.version
--------------------------------------------------------------------- ---------------------------------------------------------------------
11.2devel 11.3devel
(1 row) (1 row)
-- ensure no unexpected objects were created outside pg_catalog -- ensure no unexpected objects were created outside pg_catalog
@ -1329,11 +1353,19 @@ ORDER BY 1, 2;
-- see incompatible version errors out -- see incompatible version errors out
RESET citus.enable_version_checks; RESET citus.enable_version_checks;
RESET columnar.enable_version_checks; RESET columnar.enable_version_checks;
-- reset version check config for other sessions too
ALTER SYSTEM RESET citus.enable_version_checks;
SELECT pg_reload_conf();
pg_reload_conf
---------------------------------------------------------------------
t
(1 row)
DROP EXTENSION citus; DROP EXTENSION citus;
DROP EXTENSION citus_columnar; DROP EXTENSION citus_columnar;
CREATE EXTENSION citus VERSION '8.0-1'; CREATE EXTENSION citus VERSION '8.0-1';
ERROR: specified version incompatible with loaded Citus library ERROR: specified version incompatible with loaded Citus library
DETAIL: Loaded library requires 11.2, but 8.0-1 was specified. DETAIL: Loaded library requires 11.3, but 8.0-1 was specified.
HINT: If a newer library is present, restart the database and try the command again. HINT: If a newer library is present, restart the database and try the command again.
-- Test non-distributed queries work even in version mismatch -- Test non-distributed queries work even in version mismatch
SET citus.enable_version_checks TO 'false'; SET citus.enable_version_checks TO 'false';
@ -1378,7 +1410,7 @@ ORDER BY 1;
-- We should not distribute table in version mistmatch -- We should not distribute table in version mistmatch
SELECT create_distributed_table('version_mismatch_table', 'column1'); SELECT create_distributed_table('version_mismatch_table', 'column1');
ERROR: loaded Citus library version differs from installed extension version ERROR: loaded Citus library version differs from installed extension version
DETAIL: Loaded library requires 11.2, but the installed extension version is 8.1-1. DETAIL: Loaded library requires 11.3, but the installed extension version is 8.1-1.
HINT: Run ALTER EXTENSION citus UPDATE and try again. HINT: Run ALTER EXTENSION citus UPDATE and try again.
-- This function will cause fail in next ALTER EXTENSION -- This function will cause fail in next ALTER EXTENSION
CREATE OR REPLACE FUNCTION pg_catalog.relation_is_a_known_shard(regclass) CREATE OR REPLACE FUNCTION pg_catalog.relation_is_a_known_shard(regclass)

View File

@ -0,0 +1,307 @@
-- multi recursive queries with joins, subqueries, and ctes
CREATE SCHEMA multi_recursive;
SET search_path TO multi_recursive;
DROP TABLE IF EXISTS tbl_dist1;
NOTICE: table "tbl_dist1" does not exist, skipping
CREATE TABLE tbl_dist1(id int);
SELECT create_distributed_table('tbl_dist1','id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
DROP TABLE IF EXISTS tbl_ref1;
NOTICE: table "tbl_ref1" does not exist, skipping
CREATE TABLE tbl_ref1(id int);
SELECT create_reference_table('tbl_ref1');
create_reference_table
---------------------------------------------------------------------
(1 row)
INSERT INTO tbl_dist1 SELECT i FROM generate_series(0,10) i;
INSERT INTO tbl_ref1 SELECT i FROM generate_series(0,10) i;
-- https://github.com/citusdata/citus/issues/6653
-- The reason why inlined queries failed are all the same. After we modified the query at first pass, second pass finds out
-- noncolocated queries as we donot create equivalances between nondistributed-distributed tables.
-- QUERY1
-- recursive planner multipass the query and fails.
-- Why inlined query failed?
-- limit clause is recursively planned in inlined cte. First pass finishes here. At second pass, noncolocated queries and
-- recurring full join are recursively planned. We detect that and throw error.
SELECT t1.id
FROM (
SELECT t2.id
FROM (
SELECT t0.id
FROM tbl_dist1 t0
LIMIT 5
) AS t2
INNER JOIN tbl_dist1 AS t3 USING (id)
) AS t1
FULL JOIN tbl_dist1 t4 USING (id);
ERROR: recursive complex joins are only supported when all distributed tables are co-located and joined on their distribution columns
-- QUERY2
-- recursive planner multipass the query with inlined cte and fails. Then, cte is planned without inlining and it succeeds.
-- Why inlined query failed?
-- recurring left join is recursively planned in inlined cte. Then, limit clause causes another recursive planning. First pass
-- finishes here. At second pass, noncolocated queries and recurring right join are recursively planned. We detect that and
-- throw error.
SET client_min_messages TO DEBUG1;
WITH cte_0 AS (
SELECT id FROM tbl_dist1 WHERE id IN (
SELECT id FROM tbl_ref1
LEFT JOIN tbl_dist1 USING (id)
)
)
SELECT count(id) FROM tbl_dist1
RIGHT JOIN (
SELECT table_5.id FROM (
SELECT id FROM cte_0 LIMIT 0
) AS table_5
RIGHT JOIN tbl_dist1 USING (id)
) AS table_4 USING (id);
DEBUG: CTE cte_0 is going to be inlined via distributed planning
DEBUG: recursively planning right side of the left join since the outer side is a recurring rel
DEBUG: recursively planning distributed relation "tbl_dist1" since it is part of a distributed join node that is outer joined with a recurring rel
DEBUG: Wrapping relation "tbl_dist1" to a subquery
DEBUG: generating subplan XXX_1 for subquery SELECT id FROM multi_recursive.tbl_dist1 WHERE true
DEBUG: push down of limit count: 0
DEBUG: generating subplan XXX_2 for subquery SELECT id FROM (SELECT tbl_dist1.id FROM multi_recursive.tbl_dist1 WHERE (tbl_dist1.id OPERATOR(pg_catalog.=) ANY (SELECT tbl_ref1.id FROM (multi_recursive.tbl_ref1 LEFT JOIN (SELECT tbl_dist1_2.id FROM (SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) tbl_dist1_2) tbl_dist1_1 USING (id))))) cte_0 LIMIT 0
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(table_4.id) AS count FROM (multi_recursive.tbl_dist1 RIGHT JOIN (SELECT table_5.id FROM ((SELECT intermediate_result.id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_5 RIGHT JOIN multi_recursive.tbl_dist1 tbl_dist1_1 USING (id))) table_4 USING (id))
DEBUG: generating subplan XXX_1 for subquery SELECT table_5.id FROM ((SELECT intermediate_result.id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_5 RIGHT JOIN multi_recursive.tbl_dist1 USING (id))
DEBUG: recursively planning left side of the right join since the outer side is a recurring rel
DEBUG: recursively planning distributed relation "tbl_dist1" since it is part of a distributed join node that is outer joined with a recurring rel
DEBUG: Wrapping relation "tbl_dist1" to a subquery
DEBUG: generating subplan XXX_2 for subquery SELECT id FROM multi_recursive.tbl_dist1 WHERE true
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(table_4.id) AS count FROM ((SELECT tbl_dist1_1.id FROM (SELECT intermediate_result.id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) tbl_dist1_1) tbl_dist1 RIGHT JOIN (SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_4 USING (id))
DEBUG: generating subplan XXX_1 for CTE cte_0: SELECT id FROM multi_recursive.tbl_dist1 WHERE (id OPERATOR(pg_catalog.=) ANY (SELECT tbl_ref1.id FROM (multi_recursive.tbl_ref1 LEFT JOIN multi_recursive.tbl_dist1 tbl_dist1_1 USING (id))))
DEBUG: recursively planning right side of the left join since the outer side is a recurring rel
DEBUG: recursively planning distributed relation "tbl_dist1" since it is part of a distributed join node that is outer joined with a recurring rel
DEBUG: Wrapping relation "tbl_dist1" to a subquery
DEBUG: generating subplan XXX_1 for subquery SELECT id FROM multi_recursive.tbl_dist1 WHERE true
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT id FROM multi_recursive.tbl_dist1 WHERE (id OPERATOR(pg_catalog.=) ANY (SELECT tbl_ref1.id FROM (multi_recursive.tbl_ref1 LEFT JOIN (SELECT tbl_dist1_2.id FROM (SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) tbl_dist1_2) tbl_dist1_1 USING (id))))
DEBUG: generating subplan XXX_2 for subquery SELECT id FROM (SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) cte_0 LIMIT 0
DEBUG: generating subplan XXX_3 for subquery SELECT table_5.id FROM ((SELECT intermediate_result.id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_5 RIGHT JOIN multi_recursive.tbl_dist1 USING (id))
DEBUG: recursively planning left side of the right join since the outer side is a recurring rel
DEBUG: recursively planning distributed relation "tbl_dist1" since it is part of a distributed join node that is outer joined with a recurring rel
DEBUG: Wrapping relation "tbl_dist1" to a subquery
DEBUG: generating subplan XXX_4 for subquery SELECT id FROM multi_recursive.tbl_dist1 WHERE true
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(table_4.id) AS count FROM ((SELECT tbl_dist1_1.id FROM (SELECT intermediate_result.id FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) tbl_dist1_1) tbl_dist1 RIGHT JOIN (SELECT intermediate_result.id FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_4 USING (id))
count
---------------------------------------------------------------------
0
(1 row)
RESET client_min_messages;
DROP TABLE IF EXISTS dist0;
NOTICE: table "dist0" does not exist, skipping
CREATE TABLE dist0(id int);
SELECT create_distributed_table('dist0','id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
DROP TABLE IF EXISTS dist1;
NOTICE: table "dist1" does not exist, skipping
CREATE TABLE dist1(id int);
SELECT create_distributed_table('dist1','id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
INSERT INTO dist0 SELECT i FROM generate_series(1005,1025) i;
INSERT INTO dist1 SELECT i FROM generate_series(1015,1035) i;
-- QUERY3
-- recursive planner multipass the query with inlined cte and fails. Then, cte is planned without inlining and it succeeds.
-- Why inlined query failed?
-- noncolocated queries are recursively planned. First pass finishes here. Second pass also recursively plans noncolocated
-- queries and recurring full join. We detect the error and throw it.
SET client_min_messages TO DEBUG1;
WITH cte_0 AS (
SELECT id FROM dist0
RIGHT JOIN dist0 AS table_1 USING (id)
ORDER BY id
)
SELECT avg(avgsub.id) FROM (
SELECT table_2.id FROM (
SELECT table_3.id FROM (
SELECT table_5.id FROM cte_0 AS table_5, dist1
) AS table_3 INNER JOIN dist1 USING (id)
) AS table_2 FULL JOIN dist0 USING (id)
) AS avgsub;
DEBUG: CTE cte_0 is going to be inlined via distributed planning
DEBUG: generating subplan XXX_1 for subquery SELECT table_1.id FROM (multi_recursive.dist0 RIGHT JOIN multi_recursive.dist0 table_1 USING (id)) ORDER BY table_1.id
DEBUG: generating subplan XXX_2 for subquery SELECT table_5.id FROM (SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_5, multi_recursive.dist1
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT avg(id) AS avg FROM (SELECT table_2.id FROM ((SELECT table_3.id FROM ((SELECT intermediate_result.id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_3 JOIN multi_recursive.dist1 USING (id))) table_2 FULL JOIN multi_recursive.dist0 USING (id))) avgsub
DEBUG: generating subplan XXX_1 for subquery SELECT table_3.id FROM ((SELECT intermediate_result.id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_3 JOIN multi_recursive.dist1 USING (id))
DEBUG: recursively planning right side of the full join since the other side is a recurring rel
DEBUG: recursively planning distributed relation "dist0" since it is part of a distributed join node that is outer joined with a recurring rel
DEBUG: Wrapping relation "dist0" to a subquery
DEBUG: generating subplan XXX_2 for subquery SELECT id FROM multi_recursive.dist0 WHERE true
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT avg(id) AS avg FROM (SELECT table_2.id FROM ((SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_2 FULL JOIN (SELECT dist0_1.id FROM (SELECT intermediate_result.id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) dist0_1) dist0 USING (id))) avgsub
DEBUG: generating subplan XXX_1 for CTE cte_0: SELECT table_1.id FROM (multi_recursive.dist0 RIGHT JOIN multi_recursive.dist0 table_1 USING (id)) ORDER BY table_1.id
DEBUG: generating subplan XXX_1 for subquery SELECT table_5.id FROM (SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_5, multi_recursive.dist1
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table_3.id FROM ((SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_3 JOIN multi_recursive.dist1 USING (id))
DEBUG: generating subplan XXX_2 for subquery SELECT table_3.id FROM ((SELECT table_5.id FROM (SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_5, multi_recursive.dist1 dist1_1) table_3 JOIN multi_recursive.dist1 USING (id))
DEBUG: recursively planning right side of the full join since the other side is a recurring rel
DEBUG: recursively planning distributed relation "dist0" since it is part of a distributed join node that is outer joined with a recurring rel
DEBUG: Wrapping relation "dist0" to a subquery
DEBUG: generating subplan XXX_3 for subquery SELECT id FROM multi_recursive.dist0 WHERE true
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT avg(id) AS avg FROM (SELECT table_2.id FROM ((SELECT intermediate_result.id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_2 FULL JOIN (SELECT dist0_1.id FROM (SELECT intermediate_result.id FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) dist0_1) dist0 USING (id))) avgsub
avg
---------------------------------------------------------------------
1020.0000000000000000
(1 row)
RESET client_min_messages;
DROP TABLE IF EXISTS dist0;
CREATE TABLE dist0(id int);
SELECT create_distributed_table('dist0','id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
DROP TABLE IF EXISTS dist1;
CREATE TABLE dist1(id int);
SELECT create_distributed_table('dist1','id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
INSERT INTO dist0 SELECT i FROM generate_series(0,10) i;
INSERT INTO dist0 SELECT * FROM dist0 ORDER BY id LIMIT 1;
INSERT INTO dist1 SELECT i FROM generate_series(0,10) i;
INSERT INTO dist1 SELECT * FROM dist1 ORDER BY id LIMIT 1;
-- QUERY4
-- recursive planner multipass the query fails.
-- Why inlined query failed?
-- limit clause is recursively planned at the first pass. At second pass noncolocated queries are recursively planned.
-- We detect that and throw error.
SET client_min_messages TO DEBUG1;
SELECT avg(avgsub.id) FROM (
SELECT table_0.id FROM (
SELECT table_1.id FROM (
SELECT table_2.id FROM (
SELECT table_3.id FROM (
SELECT table_4.id FROM dist0 AS table_4
LEFT JOIN dist1 AS table_5 USING (id)
) AS table_3 INNER JOIN dist0 AS table_6 USING (id)
) AS table_2 WHERE table_2.id < 10 ORDER BY id LIMIT 47
) AS table_1 RIGHT JOIN dist0 AS table_7 USING (id)
) AS table_0 RIGHT JOIN dist1 AS table_8 USING (id)
) AS avgsub;
DEBUG: push down of limit count: 47
DEBUG: generating subplan XXX_1 for subquery SELECT id FROM (SELECT table_3.id FROM ((SELECT table_4.id FROM (multi_recursive.dist0 table_4 LEFT JOIN multi_recursive.dist1 table_5 USING (id))) table_3 JOIN multi_recursive.dist0 table_6 USING (id))) table_2 WHERE (id OPERATOR(pg_catalog.<) 10) ORDER BY id LIMIT 47
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT avg(id) AS avg FROM (SELECT table_0.id FROM ((SELECT table_1.id FROM ((SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_1 RIGHT JOIN multi_recursive.dist0 table_7 USING (id))) table_0 RIGHT JOIN multi_recursive.dist1 table_8 USING (id))) avgsub
DEBUG: generating subplan XXX_1 for subquery SELECT table_1.id FROM ((SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_1 RIGHT JOIN multi_recursive.dist0 table_7 USING (id))
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT avg(id) AS avg FROM (SELECT table_0.id FROM ((SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_0 RIGHT JOIN multi_recursive.dist1 table_8 USING (id))) avgsub
ERROR: recursive complex joins are only supported when all distributed tables are co-located and joined on their distribution columns
-- QUERY5
-- recursive planner multipass the query with inlined cte and fails. Then, cte is planned without inlining and it succeeds.
-- Why inlined query failed?
-- limit clause is recursively planned. First pass finishes here. At second pass, noncolocated tables and recurring full join
-- are recursively planned. We detect that and throw error.
WITH cte_0 AS (
SELECT table_0.id FROM dist1 AS table_0 LEFT JOIN dist1 AS table_1 USING (id) ORDER BY id LIMIT 41
)
SELECT avg(avgsub.id) FROM (
SELECT table_4.id FROM (
SELECT table_5.id FROM (
SELECT table_6.id FROM cte_0 AS table_6
) AS table_5
INNER JOIN dist0 USING (id) INNER JOIN dist1 AS table_9 USING (id)
) AS table_4 FULL JOIN dist0 USING (id)
) AS avgsub;
DEBUG: CTE cte_0 is going to be inlined via distributed planning
DEBUG: push down of limit count: 41
DEBUG: generating subplan XXX_1 for subquery SELECT table_0.id FROM (multi_recursive.dist1 table_0 LEFT JOIN multi_recursive.dist1 table_1 USING (id)) ORDER BY table_0.id LIMIT 41
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT avg(id) AS avg FROM (SELECT table_4.id FROM ((SELECT table_5.id FROM (((SELECT table_6.id FROM (SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_6) table_5 JOIN multi_recursive.dist0 dist0_1 USING (id)) JOIN multi_recursive.dist1 table_9 USING (id))) table_4 FULL JOIN multi_recursive.dist0 USING (id))) avgsub
DEBUG: generating subplan XXX_1 for subquery SELECT table_5.id FROM (((SELECT table_6.id FROM (SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_6) table_5 JOIN multi_recursive.dist0 USING (id)) JOIN multi_recursive.dist1 table_9 USING (id))
DEBUG: recursively planning right side of the full join since the other side is a recurring rel
DEBUG: recursively planning distributed relation "dist0" since it is part of a distributed join node that is outer joined with a recurring rel
DEBUG: Wrapping relation "dist0" to a subquery
DEBUG: generating subplan XXX_2 for subquery SELECT id FROM multi_recursive.dist0 WHERE true
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT avg(id) AS avg FROM (SELECT table_4.id FROM ((SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_4 FULL JOIN (SELECT dist0_1.id FROM (SELECT intermediate_result.id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) dist0_1) dist0 USING (id))) avgsub
DEBUG: generating subplan XXX_1 for CTE cte_0: SELECT table_0.id FROM (multi_recursive.dist1 table_0 LEFT JOIN multi_recursive.dist1 table_1 USING (id)) ORDER BY table_0.id LIMIT 41
DEBUG: push down of limit count: 41
DEBUG: generating subplan XXX_2 for subquery SELECT table_5.id FROM (((SELECT table_6.id FROM (SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_6) table_5 JOIN multi_recursive.dist0 USING (id)) JOIN multi_recursive.dist1 table_9 USING (id))
DEBUG: recursively planning right side of the full join since the other side is a recurring rel
DEBUG: recursively planning distributed relation "dist0" since it is part of a distributed join node that is outer joined with a recurring rel
DEBUG: Wrapping relation "dist0" to a subquery
DEBUG: generating subplan XXX_3 for subquery SELECT id FROM multi_recursive.dist0 WHERE true
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT avg(id) AS avg FROM (SELECT table_4.id FROM ((SELECT intermediate_result.id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_4 FULL JOIN (SELECT dist0_1.id FROM (SELECT intermediate_result.id FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) dist0_1) dist0 USING (id))) avgsub
avg
---------------------------------------------------------------------
1.3095238095238095
(1 row)
-- QUERY6
-- recursive planner multipass the query with inlined cte and fails. Then, cte is planned without inlining and it succeeds.
-- Why inlined query failed?
-- Same query and flow as above with explicit (NOT MATERIALIZED) option, which makes it directly inlinable. Even if
-- planner fails with inlined query, it succeeds without inlining.
WITH cte_0 AS (
SELECT table_0.id FROM dist1 AS table_0 LEFT JOIN dist1 AS table_1 USING (id) ORDER BY id LIMIT 41
)
SELECT avg(avgsub.id) FROM (
SELECT table_4.id FROM (
SELECT table_5.id FROM (
SELECT table_6.id FROM cte_0 AS table_6
) AS table_5
INNER JOIN dist0 USING (id) INNER JOIN dist1 AS table_9 USING (id)
) AS table_4 FULL JOIN dist0 USING (id)
) AS avgsub;
DEBUG: CTE cte_0 is going to be inlined via distributed planning
DEBUG: push down of limit count: 41
DEBUG: generating subplan XXX_1 for subquery SELECT table_0.id FROM (multi_recursive.dist1 table_0 LEFT JOIN multi_recursive.dist1 table_1 USING (id)) ORDER BY table_0.id LIMIT 41
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT avg(id) AS avg FROM (SELECT table_4.id FROM ((SELECT table_5.id FROM (((SELECT table_6.id FROM (SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_6) table_5 JOIN multi_recursive.dist0 dist0_1 USING (id)) JOIN multi_recursive.dist1 table_9 USING (id))) table_4 FULL JOIN multi_recursive.dist0 USING (id))) avgsub
DEBUG: generating subplan XXX_1 for subquery SELECT table_5.id FROM (((SELECT table_6.id FROM (SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_6) table_5 JOIN multi_recursive.dist0 USING (id)) JOIN multi_recursive.dist1 table_9 USING (id))
DEBUG: recursively planning right side of the full join since the other side is a recurring rel
DEBUG: recursively planning distributed relation "dist0" since it is part of a distributed join node that is outer joined with a recurring rel
DEBUG: Wrapping relation "dist0" to a subquery
DEBUG: generating subplan XXX_2 for subquery SELECT id FROM multi_recursive.dist0 WHERE true
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT avg(id) AS avg FROM (SELECT table_4.id FROM ((SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_4 FULL JOIN (SELECT dist0_1.id FROM (SELECT intermediate_result.id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) dist0_1) dist0 USING (id))) avgsub
DEBUG: generating subplan XXX_1 for CTE cte_0: SELECT table_0.id FROM (multi_recursive.dist1 table_0 LEFT JOIN multi_recursive.dist1 table_1 USING (id)) ORDER BY table_0.id LIMIT 41
DEBUG: push down of limit count: 41
DEBUG: generating subplan XXX_2 for subquery SELECT table_5.id FROM (((SELECT table_6.id FROM (SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_6) table_5 JOIN multi_recursive.dist0 USING (id)) JOIN multi_recursive.dist1 table_9 USING (id))
DEBUG: recursively planning right side of the full join since the other side is a recurring rel
DEBUG: recursively planning distributed relation "dist0" since it is part of a distributed join node that is outer joined with a recurring rel
DEBUG: Wrapping relation "dist0" to a subquery
DEBUG: generating subplan XXX_3 for subquery SELECT id FROM multi_recursive.dist0 WHERE true
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT avg(id) AS avg FROM (SELECT table_4.id FROM ((SELECT intermediate_result.id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_4 FULL JOIN (SELECT dist0_1.id FROM (SELECT intermediate_result.id FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) dist0_1) dist0 USING (id))) avgsub
avg
---------------------------------------------------------------------
1.3095238095238095
(1 row)
-- QUERY7
-- recursive planner multipass the query and fails. Note that cte is not used in the query.
-- Why inlined query failed?
-- limit clause is recursively planned. First pass finishes here. At second pass noncolocated queries are recursively planned.
-- We detect multipass and throw error.
WITH cte_0 AS (
SELECT table_0.id FROM dist1 AS table_0 FULL JOIN dist1 AS table_1 USING (id)
)
SELECT avg(table_5.id) FROM (
SELECT table_6.id FROM (
SELECT table_7.id FROM dist0 AS table_7 ORDER BY id LIMIT 87
) AS table_6 INNER JOIN dist0 AS table_8 USING (id) WHERE table_8.id < 0 ORDER BY id
) AS table_5 INNER JOIN dist0 AS table_9 USING (id);
DEBUG: push down of limit count: 87
DEBUG: generating subplan XXX_1 for subquery SELECT id FROM multi_recursive.dist0 table_7 ORDER BY id LIMIT 87
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT avg(table_5.id) AS avg FROM ((SELECT table_6.id FROM ((SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_6 JOIN multi_recursive.dist0 table_8 USING (id)) WHERE (table_8.id OPERATOR(pg_catalog.<) 0) ORDER BY table_6.id) table_5 JOIN multi_recursive.dist0 table_9 USING (id))
DEBUG: generating subplan XXX_1 for subquery SELECT table_6.id FROM ((SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_6 JOIN multi_recursive.dist0 table_8 USING (id)) WHERE (table_8.id OPERATOR(pg_catalog.<) 0) ORDER BY table_6.id
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT avg(table_5.id) AS avg FROM ((SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_5 JOIN multi_recursive.dist0 table_9 USING (id))
ERROR: recursive complex joins are only supported when all distributed tables are co-located and joined on their distribution columns
RESET client_min_messages;
DROP SCHEMA multi_recursive CASCADE;
NOTICE: drop cascades to 4 other objects
DETAIL: drop cascades to table tbl_dist1
drop cascades to table tbl_ref1
drop cascades to table dist0
drop cascades to table dist1

View File

@ -177,7 +177,7 @@ INSERT INTO limit_orders VALUES (random() * 100, 'ORCL', 152, '2011-08-25 11:50:
INSERT INTO limit_orders VALUES (2036, 'GOOG', 5634, now(), 'buy', random()); INSERT INTO limit_orders VALUES (2036, 'GOOG', 5634, now(), 'buy', random());
-- commands with mutable functions in their quals -- commands with mutable functions in their quals
DELETE FROM limit_orders WHERE id = 246 AND bidder_id = (random() * 1000); DELETE FROM limit_orders WHERE id = 246 AND bidder_id = (random() * 1000);
ERROR: functions used in the WHERE/ON/WHEN clause of modification queries on distributed tables must not be VOLATILE ERROR: functions used in the WHERE clause of modification queries on distributed tables must not be VOLATILE
-- commands with mutable but non-volatile functions(ie: stable func.) in their quals -- commands with mutable but non-volatile functions(ie: stable func.) in their quals
-- (the cast to timestamp is because the timestamp_eq_timestamptz operator is stable) -- (the cast to timestamp is because the timestamp_eq_timestamptz operator is stable)
DELETE FROM limit_orders WHERE id = 246 AND placed_at = current_timestamp::timestamp; DELETE FROM limit_orders WHERE id = 246 AND placed_at = current_timestamp::timestamp;

View File

@ -95,7 +95,7 @@ INSERT INTO limit_orders_mx VALUES (random() * 100, 'ORCL', 152, '2011-08-25 11:
INSERT INTO limit_orders_mx VALUES (2036, 'GOOG', 5634, now(), 'buy', random()); INSERT INTO limit_orders_mx VALUES (2036, 'GOOG', 5634, now(), 'buy', random());
-- commands with mutable functions in their quals -- commands with mutable functions in their quals
DELETE FROM limit_orders_mx WHERE id = 246 AND bidder_id = (random() * 1000); DELETE FROM limit_orders_mx WHERE id = 246 AND bidder_id = (random() * 1000);
ERROR: functions used in the WHERE/ON/WHEN clause of modification queries on distributed tables must not be VOLATILE ERROR: functions used in the WHERE clause of modification queries on distributed tables must not be VOLATILE
-- commands with mutable but non-volatile functions(ie: stable func.) in their quals -- commands with mutable but non-volatile functions(ie: stable func.) in their quals
-- (the cast to timestamp is because the timestamp_eq_timestamptz operator is stable) -- (the cast to timestamp is because the timestamp_eq_timestamptz operator is stable)
DELETE FROM limit_orders_mx WHERE id = 246 AND placed_at = current_timestamp::timestamp; DELETE FROM limit_orders_mx WHERE id = 246 AND placed_at = current_timestamp::timestamp;

View File

@ -1020,7 +1020,8 @@ SELECT
FROM FROM
reference_table_test, colocated_table_test reference_table_test, colocated_table_test
WHERE WHERE
colocated_table_test.value_1 = reference_table_test.value_1; colocated_table_test.value_1 = reference_table_test.value_1
ORDER BY 1;
LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ] LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ]
value_1 value_1
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1033,7 +1034,8 @@ SELECT
FROM FROM
reference_table_test, colocated_table_test reference_table_test, colocated_table_test
WHERE WHERE
colocated_table_test.value_2 = reference_table_test.value_2; colocated_table_test.value_2 = reference_table_test.value_2
ORDER BY 1;
LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ] LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ]
value_2 value_2
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1046,7 +1048,8 @@ SELECT
FROM FROM
colocated_table_test, reference_table_test colocated_table_test, reference_table_test
WHERE WHERE
reference_table_test.value_1 = colocated_table_test.value_1; reference_table_test.value_1 = colocated_table_test.value_1
ORDER BY 1;
LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ] LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ]
value_2 value_2
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1150,6 +1153,7 @@ FROM
colocated_table_test_2, reference_table_test colocated_table_test_2, reference_table_test
WHERE WHERE
colocated_table_test_2.value_4 = reference_table_test.value_4 colocated_table_test_2.value_4 = reference_table_test.value_4
ORDER BY 1
RETURNING value_1, value_2; RETURNING value_1, value_2;
value_1 | value_2 value_1 | value_2
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -674,7 +674,7 @@ UPDATE users_test_table
SET value_2 = 5 SET value_2 = 5
FROM events_test_table FROM events_test_table
WHERE users_test_table.user_id = events_test_table.user_id * random(); WHERE users_test_table.user_id = events_test_table.user_id * random();
ERROR: functions used in the WHERE/ON/WHEN clause of modification queries on distributed tables must not be VOLATILE ERROR: functions used in the WHERE clause of modification queries on distributed tables must not be VOLATILE
UPDATE users_test_table UPDATE users_test_table
SET value_2 = 5 * random() SET value_2 = 5 * random()
FROM events_test_table FROM events_test_table

View File

@ -315,7 +315,7 @@ SELECT create_reference_table('tbl2');
MERGE INTO tbl1 USING tbl2 ON (true) MERGE INTO tbl1 USING tbl2 ON (true)
WHEN MATCHED THEN DELETE; WHEN MATCHED THEN DELETE;
ERROR: MERGE command is not supported on reference tables yet ERROR: MERGE command is not supported on distributed/reference tables yet
-- now, both are reference, still not supported -- now, both are reference, still not supported
SELECT create_reference_table('tbl1'); SELECT create_reference_table('tbl1');
create_reference_table create_reference_table
@ -325,7 +325,7 @@ SELECT create_reference_table('tbl1');
MERGE INTO tbl1 USING tbl2 ON (true) MERGE INTO tbl1 USING tbl2 ON (true)
WHEN MATCHED THEN DELETE; WHEN MATCHED THEN DELETE;
ERROR: MERGE command is not supported on reference tables yet ERROR: MERGE command is not supported on distributed/reference tables yet
-- now, both distributed, not works -- now, both distributed, not works
SELECT undistribute_table('tbl1'); SELECT undistribute_table('tbl1');
NOTICE: creating a new table for pg15.tbl1 NOTICE: creating a new table for pg15.tbl1
@ -419,14 +419,14 @@ SELECT create_distributed_table('tbl2', 'x');
MERGE INTO tbl1 USING tbl2 ON (true) MERGE INTO tbl1 USING tbl2 ON (true)
WHEN MATCHED THEN DELETE; WHEN MATCHED THEN DELETE;
ERROR: MERGE command is only supported when distributed tables are joined on their distribution column ERROR: MERGE command is not supported on distributed/reference tables yet
-- also, not inside subqueries & ctes -- also, not inside subqueries & ctes
WITH targq AS ( WITH targq AS (
SELECT * FROM tbl2 SELECT * FROM tbl2
) )
MERGE INTO tbl1 USING targq ON (true) MERGE INTO tbl1 USING targq ON (true)
WHEN MATCHED THEN DELETE; WHEN MATCHED THEN DELETE;
ERROR: MERGE command is only supported when distributed tables are joined on their distribution column ERROR: MERGE command is not supported on distributed/reference tables yet
-- crashes on beta3, fixed on 15 stable -- crashes on beta3, fixed on 15 stable
--WITH foo AS ( --WITH foo AS (
-- MERGE INTO tbl1 USING tbl2 ON (true) -- MERGE INTO tbl1 USING tbl2 ON (true)
@ -441,7 +441,7 @@ USING tbl2
ON (true) ON (true)
WHEN MATCHED THEN WHEN MATCHED THEN
UPDATE SET x = (SELECT count(*) FROM tbl2); UPDATE SET x = (SELECT count(*) FROM tbl2);
ERROR: MERGE command is only supported when distributed tables are joined on their distribution column ERROR: MERGE command is not supported on distributed/reference tables yet
-- test numeric types with negative scale -- test numeric types with negative scale
CREATE TABLE numeric_negative_scale(numeric_column numeric(3,-1), orig_value int); CREATE TABLE numeric_negative_scale(numeric_column numeric(3,-1), orig_value int);
INSERT into numeric_negative_scale SELECT x,x FROM generate_series(111, 115) x; INSERT into numeric_negative_scale SELECT x,x FROM generate_series(111, 115) x;

View File

@ -910,15 +910,7 @@ MERGE INTO wq_target t
USING wq_source s ON t.tid = s.sid USING wq_source s ON t.tid = s.sid
WHEN MATCHED AND (merge_when_and_write()) THEN WHEN MATCHED AND (merge_when_and_write()) THEN
UPDATE SET balance = t.balance + s.balance; UPDATE SET balance = t.balance + s.balance;
ERROR: functions used in the WHERE/ON/WHEN clause of modification queries on distributed tables must not be VOLATILE ERROR: functions used in UPDATE queries on distributed tables must not be VOLATILE
ROLLBACK;
-- Test preventing ON condition from writing to the database
BEGIN;
MERGE INTO wq_target t
USING wq_source s ON t.tid = s.sid AND (merge_when_and_write())
WHEN MATCHED THEN
UPDATE SET balance = t.balance + s.balance;
ERROR: functions used in the WHERE/ON/WHEN clause of modification queries on distributed tables must not be VOLATILE
ROLLBACK; ROLLBACK;
drop function merge_when_and_write(); drop function merge_when_and_write();
DROP TABLE wq_target, wq_source; DROP TABLE wq_target, wq_source;

View File

@ -503,3 +503,82 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id FROM
--------------------------------------------------------------------- ---------------------------------------------------------------------
(0 rows) (0 rows)
CREATE TABLE dist(id int, value int);
SELECT create_distributed_table('dist','id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
INSERT INTO dist SELECT i, i FROM generate_series(0,100) i;
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
DEBUG: Collecting INSERT ... SELECT results on coordinator
CREATE TABLE ref(id int);
SELECT create_reference_table('ref');
create_reference_table
---------------------------------------------------------------------
(1 row)
INSERT INTO ref SELECT i FROM generate_series(50,150) i;
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
DEBUG: Collecting INSERT ... SELECT results on coordinator
CREATE TABLE local(id int);
INSERT INTO local SELECT i FROM generate_series(50,150) i;
-- planner recursively plans local table in local-dist join and then the whole query is routed
SELECT COUNT(*) FROM dist JOIN local USING(id)
WHERE
dist.id IN (SELECT id FROM dist WHERE id = 55) AND
dist.id = 55 AND
dist.value IN (SELECT value FROM dist WHERE id = 55);
DEBUG: Wrapping relation "local" to a subquery
DEBUG: generating subplan XXX_1 for subquery SELECT id FROM public.local WHERE (id OPERATOR(pg_catalog.=) 55)
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (public.dist JOIN (SELECT local_1.id FROM (SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) local_1) local USING (id)) WHERE ((dist.id OPERATOR(pg_catalog.=) ANY (SELECT dist_1.id FROM public.dist dist_1 WHERE (dist_1.id OPERATOR(pg_catalog.=) 55))) AND (dist.id OPERATOR(pg_catalog.=) 55) AND (dist.value OPERATOR(pg_catalog.=) ANY (SELECT dist_1.value FROM public.dist dist_1 WHERE (dist_1.id OPERATOR(pg_catalog.=) 55))))
count
---------------------------------------------------------------------
1
(1 row)
-- subquery in WHERE clause should be recursively planned after planner recursively plans recurring full join
SELECT COUNT(*) FROM ref FULL JOIN dist USING (id)
WHERE
dist.id IN (SELECT id FROM dist GROUP BY id);
DEBUG: recursively planning right side of the full join since the other side is a recurring rel
DEBUG: recursively planning distributed relation "dist" since it is part of a distributed join node that is outer joined with a recurring rel
DEBUG: Wrapping relation "dist" to a subquery
DEBUG: generating subplan XXX_1 for subquery SELECT id FROM public.dist WHERE true
DEBUG: generating subplan XXX_2 for subquery SELECT id FROM public.dist GROUP BY id
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (public.ref FULL JOIN (SELECT dist_1.id, NULL::integer AS value FROM (SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) dist_1) dist USING (id)) WHERE (dist.id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer)))
count
---------------------------------------------------------------------
101
(1 row)
-- subqueries in WHERE clause should be recursively planned after planner recursively plans full outer join
SELECT COUNT(*) FROM dist FULL JOIN ref USING(id)
WHERE
dist.id IN (SELECT id FROM dist WHERE id > 5) AND
dist.value IN (SELECT value FROM dist WHERE id > 15);
DEBUG: generating subplan XXX_1 for subquery SELECT value FROM public.dist WHERE (id OPERATOR(pg_catalog.>) 15)
DEBUG: recursively planning left side of the full join since the other side is a recurring rel
DEBUG: recursively planning distributed relation "dist" since it is part of a distributed join node that is outer joined with a recurring rel
DEBUG: Wrapping relation "dist" to a subquery
DEBUG: generating subplan XXX_2 for subquery SELECT id, value FROM public.dist WHERE true
DEBUG: generating subplan XXX_3 for subquery SELECT id FROM public.dist WHERE (id OPERATOR(pg_catalog.>) 5)
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT dist_1.id, dist_1.value FROM (SELECT intermediate_result.id, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer, value integer)) dist_1) dist FULL JOIN public.ref USING (id)) WHERE ((dist.id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.id FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(id integer))) AND (dist.value OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value integer))))
count
---------------------------------------------------------------------
85
(1 row)
-- sublinks in the targetlist are not supported
SELECT (SELECT id FROM dist WHERE dist.id > d1.id GROUP BY id) FROM ref FULL JOIN dist d1 USING (id);
DEBUG: recursively planning right side of the full join since the other side is a recurring rel
DEBUG: recursively planning distributed relation "dist" "d1" since it is part of a distributed join node that is outer joined with a recurring rel
DEBUG: Wrapping relation "dist" "d1" to a subquery
DEBUG: generating subplan XXX_1 for subquery SELECT id FROM public.dist d1 WHERE true
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (SELECT dist.id FROM public.dist WHERE (dist.id OPERATOR(pg_catalog.>) d1.id) GROUP BY dist.id) AS id FROM (public.ref FULL JOIN (SELECT d1_1.id, NULL::integer AS value FROM (SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) d1_1) d1 USING (id))
ERROR: correlated subqueries are not supported when the FROM clause contains a reference table
DROP TABLE dist;
DROP TABLE ref;
DROP TABLE local;

View File

@ -371,3 +371,6 @@ select count(DISTINCT value) from text_data;
11 11
(1 row) (1 row)
-- test using a columnar partition
CREATE TABLE foo (d DATE NOT NULL) PARTITION BY RANGE (d);
CREATE TABLE foo3 PARTITION OF foo FOR VALUES FROM ('2009-02-01') TO ('2009-03-01') USING COLUMNAR;

View File

@ -54,7 +54,7 @@ test: subqueries_deep subquery_view subquery_partitioning subqueries_not_support
test: subquery_in_targetlist subquery_in_where subquery_complex_target_list subquery_append test: subquery_in_targetlist subquery_in_where subquery_complex_target_list subquery_append
test: subquery_prepared_statements test: subquery_prepared_statements
test: non_colocated_leaf_subquery_joins non_colocated_subquery_joins test: non_colocated_leaf_subquery_joins non_colocated_subquery_joins
test: cte_inline recursive_view_local_table values sequences_with_different_types test: cte_inline recursive_view_local_table values sequences_with_different_types multi_level_recursive_queries
test: pg13 pg12 test: pg13 pg12
# run pg14 sequentially as it syncs metadata # run pg14 sequentially as it syncs metadata
test: pg14 test: pg14
@ -95,7 +95,7 @@ test: multi_dropped_column_aliases foreign_key_restriction_enforcement
test: binary_protocol test: binary_protocol
test: alter_table_set_access_method test: alter_table_set_access_method
test: alter_distributed_table test: alter_distributed_table
test: issue_5248 issue_5099 issue_5763 test: issue_5248 issue_5099 issue_5763 issue_6543
test: object_propagation_debug test: object_propagation_debug
test: undistribute_table test: undistribute_table
test: run_command_on_all_nodes test: run_command_on_all_nodes
@ -121,3 +121,4 @@ test: ensure_no_shared_connection_leak
test: check_mx test: check_mx
test: generated_identity test: generated_identity
test: drop_database

View File

@ -561,7 +561,7 @@ if($isolationtester)
# maintenance daemon. # maintenance daemon.
push(@pgOptions, "citus.distributed_deadlock_detection_factor=-1"); push(@pgOptions, "citus.distributed_deadlock_detection_factor=-1");
push(@pgOptions, "citus.recover_2pc_interval=-1"); push(@pgOptions, "citus.recover_2pc_interval=-1");
push(@pgOptions, "citus.enable_statistics_collection=-1"); push(@pgOptions, "citus.enable_statistics_collection=false");
push(@pgOptions, "citus.defer_shard_delete_interval=-1"); push(@pgOptions, "citus.defer_shard_delete_interval=-1");
push(@pgOptions, "citus.stat_statements_purge_interval=-1"); push(@pgOptions, "citus.stat_statements_purge_interval=-1");
push(@pgOptions, "citus.background_task_queue_interval=-1"); push(@pgOptions, "citus.background_task_queue_interval=-1");

View File

@ -0,0 +1,72 @@
#include "isolation_mx_common.include.spec"
setup
{
SET citus.shard_count to 1;
SET citus.shard_replication_factor to 1;
CREATE TABLE t1 (id int PRIMARY KEY, value int);
SELECT create_distributed_table('t1', 'id');
CREATE TABLE t2 (id int PRIMARY KEY, value int);
SELECT create_distributed_table('t2', 'id');
CREATE TABLE r (id int PRIMARY KEY, value int);
SELECT create_reference_table('r');
SELECT get_shard_id_for_distribution_column('t1', 5) INTO selected_shard_for_test_table;
}
setup {
ALTER TABLE t1 ADD CONSTRAINT t1_t2_fkey FOREIGN KEY (id) REFERENCES t2(id);
}
setup {
ALTER TABLE t1 ADD CONSTRAINT t1_r_fkey FOREIGN KEY (value) REFERENCES r(id);
}
teardown
{
DROP TABLE t1;
DROP TABLE t2;
DROP TABLE r;
DROP TABLE selected_shard_for_test_table;
}
session "s1"
step "s1-start-session-level-connection"
{
SELECT start_session_level_connection_to_node('localhost', 57638);
}
// This inserts a foreign key violation directly into the shard on the target
// worker. Since we're not validating the foreign key on the new shard on
// purpose we expect no errors.
step "s1-insert-violation-into-shard"
{
SELECT run_commands_on_session_level_connection_to_node(format('INSERT INTO t1_%s VALUES (-1, -1)', (SELECT * FROM selected_shard_for_test_table)));
}
session "s2"
step "s2-move-placement"
{
SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638);
}
session "s3"
// this advisory lock with (almost) random values are only used
// for testing purposes. For details, check Citus' logical replication
// source code
step "s3-acquire-advisory-lock"
{
SELECT pg_advisory_lock(44000, 55152);
}
step "s3-release-advisory-lock"
{
SELECT pg_advisory_unlock(44000, 55152);
}
permutation "s1-start-session-level-connection" "s3-acquire-advisory-lock" "s2-move-placement" "s1-start-session-level-connection" "s1-insert-violation-into-shard" "s3-release-advisory-lock"

View File

@ -155,6 +155,12 @@ step "s2-show-pg_dist_cleanup"
SELECT object_name, object_type, policy_type FROM pg_dist_cleanup; SELECT object_name, object_type, policy_type FROM pg_dist_cleanup;
} }
step "s2-show-pg_dist_cleanup-shards"
{
SELECT object_name, object_type, policy_type FROM pg_dist_cleanup
WHERE object_type = 1;
}
step "s2-print-cluster" step "s2-print-cluster"
{ {
-- row count per shard -- row count per shard
@ -233,9 +239,9 @@ permutation "s2-insert" "s2-print-cluster" "s3-acquire-advisory-lock" "s1-begin"
// With Deferred drop, AccessShareLock (acquired by SELECTS) do not block split from completion. // With Deferred drop, AccessShareLock (acquired by SELECTS) do not block split from completion.
permutation "s1-load-cache" "s1-start-connection" "s1-lock-to-split-shard" "s2-print-locks" "s2-non-blocking-shard-split" "s2-print-locks" "s2-show-pg_dist_cleanup" "s1-stop-connection" permutation "s1-load-cache" "s1-start-connection" "s1-lock-to-split-shard" "s2-print-locks" "s2-non-blocking-shard-split" "s2-print-locks" "s2-show-pg_dist_cleanup-shards" "s1-stop-connection"
// The same test above without loading the cache at first // The same test above without loading the cache at first
permutation "s1-start-connection" "s1-lock-to-split-shard" "s2-print-locks" "s2-non-blocking-shard-split" "s2-print-cluster" "s2-show-pg_dist_cleanup" "s1-stop-connection" permutation "s1-start-connection" "s1-lock-to-split-shard" "s2-print-locks" "s2-non-blocking-shard-split" "s2-print-cluster" "s2-show-pg_dist_cleanup-shards" "s1-stop-connection"
// When a split operation is running, cleaner cannot clean its resources. // When a split operation is running, cleaner cannot clean its resources.
permutation "s1-load-cache" "s1-acquire-split-advisory-lock" "s2-non-blocking-shard-split" "s1-run-cleaner" "s1-show-pg_dist_cleanup" "s1-release-split-advisory-lock" "s1-run-cleaner" "s2-show-pg_dist_cleanup" permutation "s1-load-cache" "s1-acquire-split-advisory-lock" "s2-non-blocking-shard-split" "s1-run-cleaner" "s1-show-pg_dist_cleanup" "s1-release-split-advisory-lock" "s1-run-cleaner" "s2-show-pg_dist_cleanup"

View File

@ -61,7 +61,6 @@ SELECT citus_rebalance_wait();
DROP TABLE t1; DROP TABLE t1;
-- make sure a non-super user can stop rebalancing -- make sure a non-super user can stop rebalancing
CREATE USER non_super_user_rebalance WITH LOGIN; CREATE USER non_super_user_rebalance WITH LOGIN;
GRANT ALL ON SCHEMA background_rebalance TO non_super_user_rebalance; GRANT ALL ON SCHEMA background_rebalance TO non_super_user_rebalance;
@ -77,6 +76,37 @@ SELECT citus_rebalance_stop();
RESET ROLE; RESET ROLE;
CREATE TABLE ref_no_pk(a int);
SELECT create_reference_table('ref_no_pk');
CREATE TABLE ref_with_pk(a int primary key);
SELECT create_reference_table('ref_with_pk');
-- Add coordinator so there's a node which doesn't have the reference tables
SELECT 1 FROM citus_add_node('localhost', :master_port, groupId=>0);
-- fails
BEGIN;
SELECT 1 FROM citus_rebalance_start();
ROLLBACK;
-- success
BEGIN;
SELECT 1 FROM citus_rebalance_start(shard_transfer_mode := 'force_logical');
ROLLBACK;
-- success
BEGIN;
SELECT 1 FROM citus_rebalance_start(shard_transfer_mode := 'block_writes');
ROLLBACK;
-- fails
SELECT 1 FROM citus_rebalance_start();
-- succeeds
SELECT 1 FROM citus_rebalance_start(shard_transfer_mode := 'force_logical');
-- wait for success
SELECT citus_rebalance_wait();
SELECT state, details from citus_rebalance_status();
-- Remove coordinator again to allow rerunning of this test
SELECT 1 FROM citus_remove_node('localhost', :master_port);
SELECT public.wait_until_metadata_sync(30000);
SET client_min_messages TO WARNING; SET client_min_messages TO WARNING;
DROP SCHEMA background_rebalance CASCADE; DROP SCHEMA background_rebalance CASCADE;

View File

@ -250,6 +250,26 @@ JOIN
citus_local c2 citus_local c2
USING (key); USING (key);
-- prefer-distributed option causes recursive planner passes the query 2 times and errors out
-- planner recursively plans one of the distributed_table in its first pass. Then, at its second
-- pass, it also recursively plans other distributed_table as modification at first step caused it.
SET citus.local_table_join_policy TO 'prefer-distributed';
SELECT
COUNT(*)
FROM
postgres_table
JOIN
distributed_table
USING
(key)
JOIN
(SELECT key, NULL, NULL FROM distributed_table) foo
USING
(key);
RESET citus.local_table_join_policy;
SET client_min_messages to ERROR; SET client_min_messages to ERROR;
DROP TABLE citus_local; DROP TABLE citus_local;

View File

@ -170,7 +170,7 @@ ROLLBACK;
CREATE FOREIGN TABLE foreign_table ( CREATE FOREIGN TABLE foreign_table (
id bigint not null, id bigint not null,
full_name text not null default '' full_name text not null default ''
) SERVER fake_fdw_server OPTIONS (encoding 'utf-8', compression 'true'); ) SERVER fake_fdw_server OPTIONS (encoding 'utf-8', compression 'true', table_name 'foreign_table');
-- observe that we do not create fdw server for shell table, both shard relation -- observe that we do not create fdw server for shell table, both shard relation
-- & shell relation points to the same same server object -- & shell relation points to the same same server object

View File

@ -2,7 +2,23 @@
-- Test the CREATE statements related to columnar. -- Test the CREATE statements related to columnar.
-- --
-- We cannot create below tables within columnar_create because columnar_create
-- is dropped at the end of this test but unfortunately some other tests depend
-- those tables too.
--
-- However, this file has to be runnable multiple times for flaky test detection;
-- so we create them below --outside columnar_create-- idempotantly.
DO
$$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM pg_class
WHERE relname = 'contestant' AND
relnamespace = (
SELECT oid FROM pg_namespace WHERE nspname = 'public'
)
)
THEN
-- Create uncompressed table -- Create uncompressed table
CREATE TABLE contestant (handle TEXT, birthdate DATE, rating INT, CREATE TABLE contestant (handle TEXT, birthdate DATE, rating INT,
percentile FLOAT, country CHAR(3), achievements TEXT[]) percentile FLOAT, country CHAR(3), achievements TEXT[])
@ -18,8 +34,16 @@ CREATE TABLE contestant_compressed (handle TEXT, birthdate DATE, rating INT,
-- Test that querying an empty table works -- Test that querying an empty table works
ANALYZE contestant; ANALYZE contestant;
END IF;
END
$$
LANGUAGE plpgsql;
SELECT count(*) FROM contestant; SELECT count(*) FROM contestant;
CREATE SCHEMA columnar_create;
SET search_path TO columnar_create;
-- Should fail: unlogged tables not supported -- Should fail: unlogged tables not supported
CREATE UNLOGGED TABLE columnar_unlogged(i int) USING columnar; CREATE UNLOGGED TABLE columnar_unlogged(i int) USING columnar;
@ -48,6 +72,58 @@ ROLLBACK;
-- since we rollback'ed above xact, should return true -- since we rollback'ed above xact, should return true
SELECT columnar_test_helpers.columnar_metadata_has_storage_id(:columnar_table_1_storage_id); SELECT columnar_test_helpers.columnar_metadata_has_storage_id(:columnar_table_1_storage_id);
BEGIN;
INSERT INTO columnar_table_1 VALUES (2);
ROLLBACK;
INSERT INTO columnar_table_1 VALUES (3),(4);
INSERT INTO columnar_table_1 VALUES (5),(6);
INSERT INTO columnar_table_1 VALUES (7),(8);
-- Test whether columnar metadata accessors are still fine even
-- when the metadata indexes are not available to them.
BEGIN;
ALTER INDEX columnar_internal.stripe_first_row_number_idx RENAME TO new_index_name;
ALTER INDEX columnar_internal.chunk_pkey RENAME TO new_index_name_1;
ALTER INDEX columnar_internal.stripe_pkey RENAME TO new_index_name_2;
ALTER INDEX columnar_internal.chunk_group_pkey RENAME TO new_index_name_3;
CREATE INDEX columnar_table_1_idx ON columnar_table_1(a);
-- make sure that we test index scan
SET LOCAL columnar.enable_custom_scan TO 'off';
SET LOCAL enable_seqscan TO off;
SET LOCAL seq_page_cost TO 10000000;
SELECT * FROM columnar_table_1 WHERE a = 6;
SELECT * FROM columnar_table_1 WHERE a = 5;
SELECT * FROM columnar_table_1 WHERE a = 7;
SELECT * FROM columnar_table_1 WHERE a = 3;
DROP INDEX columnar_table_1_idx;
-- Re-shuffle some metadata records to test whether we can
-- rely on sequential metadata scan when the metadata records
-- are not ordered by their "first_row_number"s.
WITH cte AS (
DELETE FROM columnar_internal.stripe
WHERE storage_id = columnar.get_storage_id('columnar_table_1')
RETURNING *
)
INSERT INTO columnar_internal.stripe SELECT * FROM cte ORDER BY first_row_number DESC;
SELECT SUM(a) FROM columnar_table_1;
SELECT * FROM columnar_table_1 WHERE a = 6;
-- Run a SELECT query after the INSERT command to force flushing the
-- data within the xact block.
INSERT INTO columnar_table_1 VALUES (20);
SELECT COUNT(*) FROM columnar_table_1;
DROP TABLE columnar_table_1 CASCADE;
ROLLBACK;
-- test dropping columnar table -- test dropping columnar table
DROP TABLE columnar_table_1 CASCADE; DROP TABLE columnar_table_1 CASCADE;
SELECT columnar_test_helpers.columnar_metadata_has_storage_id(:columnar_table_1_storage_id); SELECT columnar_test_helpers.columnar_metadata_has_storage_id(:columnar_table_1_storage_id);
@ -66,6 +142,7 @@ FROM pg_class WHERE relname='columnar_temp' \gset
SELECT pg_backend_pid() AS val INTO old_backend_pid; SELECT pg_backend_pid() AS val INTO old_backend_pid;
\c - - - :master_port \c - - - :master_port
SET search_path TO columnar_create;
-- wait until old backend to expire to make sure that temp table cleanup is complete -- wait until old backend to expire to make sure that temp table cleanup is complete
SELECT columnar_test_helpers.pg_waitpid(val) FROM old_backend_pid; SELECT columnar_test_helpers.pg_waitpid(val) FROM old_backend_pid;
@ -132,3 +209,6 @@ SELECT columnar_test_helpers.columnar_metadata_has_storage_id(:columnar_temp_sto
-- make sure citus_columnar can be loaded -- make sure citus_columnar can be loaded
LOAD 'citus_columnar'; LOAD 'citus_columnar';
SET client_min_messages TO WARNING;
DROP SCHEMA columnar_create CASCADE;

View File

@ -167,6 +167,16 @@ SELECT SUM(a)=312487500 FROM columnar_table WHERE a < 25000;
SELECT SUM(a)=167000 FROM columnar_table WHERE a = 16000 OR a = 151000; SELECT SUM(a)=167000 FROM columnar_table WHERE a = 16000 OR a = 151000;
SELECT SUM(a)=48000 FROM columnar_table WHERE a = 16000 OR a = 32000; SELECT SUM(a)=48000 FROM columnar_table WHERE a = 16000 OR a = 32000;
BEGIN;
ALTER INDEX columnar_internal.stripe_first_row_number_idx RENAME TO new_index_name;
ALTER INDEX columnar_internal.chunk_pkey RENAME TO new_index_name_1;
-- same queries but this time some metadata indexes are not available
SELECT SUM(a)=312487500 FROM columnar_table WHERE a < 25000;
SELECT SUM(a)=167000 FROM columnar_table WHERE a = 16000 OR a = 151000;
SELECT SUM(a)=48000 FROM columnar_table WHERE a = 16000 OR a = 32000;
ROLLBACK;
TRUNCATE columnar_table; TRUNCATE columnar_table;
ALTER TABLE columnar_table DROP CONSTRAINT columnar_table_pkey; ALTER TABLE columnar_table DROP CONSTRAINT columnar_table_pkey;

View File

@ -223,5 +223,13 @@ INSERT INTO reference_table VALUES ('(4)'), ('(5)');
SELECT * FROM reference_table ORDER BY 1; SELECT * FROM reference_table ORDER BY 1;
-- failing UPDATE on a reference table with a subquery in RETURNING clause that needs to be pushed-down.
-- the error message is not great, but at least we no longer see crashes.
CREATE TABLE ref (a int);
SELECT create_reference_table('ref');
UPDATE ref SET a = 1 RETURNING
(SELECT pg_catalog.max(latest_end_time) FROM pg_catalog.pg_stat_wal_receiver)
as c3;
SET client_min_messages TO ERROR; SET client_min_messages TO ERROR;
DROP SCHEMA coordinator_evaluation CASCADE; DROP SCHEMA coordinator_evaluation CASCADE;

View File

@ -0,0 +1,45 @@
-- coordinator
CREATE SCHEMA drop_database;
SET search_path TO drop_database;
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 35137400;
CREATE DATABASE citus_created;
\c citus_created
CREATE EXTENSION citus;
CREATE DATABASE citus_not_created;
\c citus_not_created
DROP DATABASE citus_created;
\c regression
DROP DATABASE citus_not_created;
-- worker1
\c - - - :worker_1_port
SET search_path TO drop_database;
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 35137400;
CREATE DATABASE citus_created;
\c citus_created
CREATE EXTENSION citus;
CREATE DATABASE citus_not_created;
\c citus_not_created
DROP DATABASE citus_created;
\c regression
DROP DATABASE citus_not_created;
\c - - - :master_port
SET client_min_messages TO WARNING;
DROP SCHEMA drop_database CASCADE;

View File

@ -7,6 +7,14 @@ SET citus.enable_local_execution TO ON;
CREATE SCHEMA foreign_tables_schema_mx; CREATE SCHEMA foreign_tables_schema_mx;
SET search_path TO foreign_tables_schema_mx; SET search_path TO foreign_tables_schema_mx;
SET client_min_messages to ERROR;
-- ensure that coordinator is added to pg_dist_node
SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0);
RESET client_min_messages;
-- test adding foreign table to metadata with the guc -- test adding foreign table to metadata with the guc
SET citus.use_citus_managed_tables TO ON; SET citus.use_citus_managed_tables TO ON;
CREATE TABLE foreign_table_test (id integer NOT NULL, data text, a bigserial); CREATE TABLE foreign_table_test (id integer NOT NULL, data text, a bigserial);
@ -219,7 +227,6 @@ SELECT * FROM ref_tbl d JOIN foreign_table_local f ON d.a=f.id ORDER BY f.id;
SET search_path TO foreign_tables_schema_mx; SET search_path TO foreign_tables_schema_mx;
-- should error out because doesn't have a table_name field
CREATE FOREIGN TABLE foreign_table_local_fails ( CREATE FOREIGN TABLE foreign_table_local_fails (
id integer NOT NULL, id integer NOT NULL,
data text data text
@ -227,8 +234,75 @@ CREATE FOREIGN TABLE foreign_table_local_fails (
SERVER foreign_server_local SERVER foreign_server_local
OPTIONS (schema_name 'foreign_tables_schema_mx'); OPTIONS (schema_name 'foreign_tables_schema_mx');
-- should error out because doesn't have a table_name field
SELECT citus_add_local_table_to_metadata('foreign_table_local_fails');
-- should work since it has a table_name
ALTER FOREIGN TABLE foreign_table_local_fails OPTIONS (table_name 'foreign_table_test');
SELECT citus_add_local_table_to_metadata('foreign_table_local_fails');
INSERT INTO foreign_table_test VALUES (1, 'test');
SELECT undistribute_table('foreign_table_local_fails');
DROP FOREIGN TABLE foreign_table_local; DROP FOREIGN TABLE foreign_table_local;
-- disallow dropping table_name when foreign table is in metadata
CREATE TABLE table_name_drop(id int);
CREATE FOREIGN TABLE foreign_table_name_drop_fails (
id INT
)
SERVER foreign_server_local
OPTIONS (schema_name 'foreign_tables_schema_mx', table_name 'table_name_drop');
SELECT citus_add_local_table_to_metadata('foreign_table_name_drop_fails');
-- table_name option is already added
ALTER FOREIGN TABLE foreign_table_name_drop_fails OPTIONS (ADD table_name 'table_name_drop');
-- throw error if user tries to drop table_name option from a foreign table inside metadata
ALTER FOREIGN TABLE foreign_table_name_drop_fails OPTIONS (DROP table_name);
-- case sensitive option name
ALTER FOREIGN TABLE foreign_table_name_drop_fails OPTIONS (DROP Table_Name);
-- other options are allowed to drop
ALTER FOREIGN TABLE foreign_table_name_drop_fails OPTIONS (DROP schema_name);
CREATE FOREIGN TABLE foreign_table_name_drop (
id INT
)
SERVER foreign_server_local
OPTIONS (schema_name 'foreign_tables_schema_mx', table_name 'table_name_drop');
-- user can drop table_option if foreign table is not in metadata
ALTER FOREIGN TABLE foreign_table_name_drop OPTIONS (DROP table_name);
-- we should not intercept data wrappers other than postgres_fdw
CREATE EXTENSION file_fdw;
-- remove validator method to add table_name option; otherwise, table_name option is not allowed
SELECT result FROM run_command_on_all_nodes('ALTER FOREIGN DATA WRAPPER file_fdw NO VALIDATOR');
CREATE SERVER citustest FOREIGN DATA WRAPPER file_fdw;
\copy (select i from generate_series(0,100)i) to '/tmp/test_file_fdw.data';
CREATE FOREIGN TABLE citustest_filefdw (
data text
)
SERVER citustest
OPTIONS ( filename '/tmp/test_file_fdw.data');
-- add non-postgres_fdw table into metadata even if it does not have table_name option
SELECT citus_add_local_table_to_metadata('citustest_filefdw');
ALTER FOREIGN TABLE citustest_filefdw OPTIONS (ADD table_name 'unused_table_name_option');
-- drop table_name option of non-postgres_fdw table even if it is inside metadata
ALTER FOREIGN TABLE citustest_filefdw OPTIONS (DROP table_name);
-- cleanup at exit -- cleanup at exit
set client_min_messages to error; set client_min_messages to error;
DROP SCHEMA foreign_tables_schema_mx CASCADE; DROP SCHEMA foreign_tables_schema_mx CASCADE;

View File

@ -0,0 +1,24 @@
CREATE SCHEMA issue_6543;
SET search_path TO issue_6543;
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 67322500;
CREATE TABLE event (
tenant_id varchar,
id bigint,
primary key (tenant_id, id)
);
CREATE TABLE page (
tenant_id varchar,
id int,
primary key (tenant_id, id)
);
SELECT create_distributed_table('event', 'tenant_id');
SELECT create_distributed_table('page', 'tenant_id', colocate_with => 'event');
alter table page add constraint fk21 foreign key (tenant_id, id) references event;
SET client_min_messages TO WARNING;
DROP SCHEMA issue_6543 CASCADE;

View File

@ -289,18 +289,6 @@ ORDER BY 1;
SELECT local.title, local.title FROM local JOIN distributed USING(id) ORDER BY 1,2 LIMIt 1; SELECT local.title, local.title FROM local JOIN distributed USING(id) ORDER BY 1,2 LIMIt 1;
SELECT NULL FROM local JOIN distributed USING(id) ORDER BY 1 LIMIt 1; SELECT NULL FROM local JOIN distributed USING(id) ORDER BY 1 LIMIt 1;
SELECT distributed.name, distributed.name, local.title, local.title FROM local JOIN distributed USING(id) ORDER BY 1,2,3,4 LIMIT 1; SELECT distributed.name, distributed.name, local.title, local.title FROM local JOIN distributed USING(id) ORDER BY 1,2,3,4 LIMIT 1;
SELECT
COUNT(*)
FROM
local
JOIN
distributed
USING
(id)
JOIN
(SELECT id, NULL, NULL FROM distributed) foo
USING
(id);
BEGIN; BEGIN;
SELECT COUNT(DISTINCT title) FROM local; SELECT COUNT(DISTINCT title) FROM local;

View File

@ -19,7 +19,6 @@ SET search_path TO merge_schema;
SET citus.shard_count TO 4; SET citus.shard_count TO 4;
SET citus.next_shard_id TO 4000000; SET citus.next_shard_id TO 4000000;
SET citus.explain_all_tasks to true; SET citus.explain_all_tasks to true;
SET citus.shard_replication_factor TO 1;
SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0); SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0);
CREATE TABLE source CREATE TABLE source
@ -144,13 +143,9 @@ SELECT undistribute_table('source');
SELECT create_distributed_table('target', 'customer_id'); SELECT create_distributed_table('target', 'customer_id');
SELECT create_distributed_table('source', 'customer_id'); SELECT create_distributed_table('source', 'customer_id');
-- Updates one of the row with customer_id = 30002
SELECT * from target t WHERE t.customer_id = 30002;
-- Turn on notice to print tasks sent to nodes (it should be a single task)
SET citus.log_remote_commands to true;
MERGE INTO target t MERGE INTO target t
USING source s USING source s
ON (t.customer_id = s.customer_id) AND t.customer_id = 30002 ON (t.customer_id = s.customer_id)
WHEN MATCHED AND t.order_center = 'XX' THEN WHEN MATCHED AND t.order_center = 'XX' THEN
DELETE DELETE
@ -163,27 +158,6 @@ MERGE INTO target t
WHEN NOT MATCHED THEN -- New entry, record it. WHEN NOT MATCHED THEN -- New entry, record it.
INSERT (customer_id, last_order_id, order_center, order_count, last_order) INSERT (customer_id, last_order_id, order_center, order_count, last_order)
VALUES (customer_id, s.order_id, s.order_center, 123, s.order_time); VALUES (customer_id, s.order_id, s.order_center, 123, s.order_time);
SET citus.log_remote_commands to false;
SELECT * from target t WHERE t.customer_id = 30002;
-- Deletes one of the row with customer_id = 30004
SELECT * from target t WHERE t.customer_id = 30004;
MERGE INTO target t
USING source s
ON (t.customer_id = s.customer_id) AND t.customer_id = 30004
WHEN MATCHED AND t.order_center = 'XX' THEN
DELETE
WHEN MATCHED THEN
UPDATE SET -- Existing customer, update the order count and last_order_id
order_count = t.order_count + 1,
last_order_id = s.order_id
WHEN NOT MATCHED THEN -- New entry, record it.
INSERT (customer_id, last_order_id, order_center, order_count, last_order)
VALUES (customer_id, s.order_id, s.order_center, 123, s.order_time);
SELECT * from target t WHERE t.customer_id = 30004;
-- --
-- Test MERGE with CTE as source -- Test MERGE with CTE as source
@ -269,13 +243,11 @@ SELECT create_distributed_table('t1', 'id');
SELECT create_distributed_table('s1', 'id'); SELECT create_distributed_table('s1', 'id');
SELECT * FROM t1 order by id;
SET citus.log_remote_commands to true;
WITH s1_res AS ( WITH s1_res AS (
SELECT * FROM s1 SELECT * FROM s1
) )
MERGE INTO t1 MERGE INTO t1
USING s1_res ON (s1_res.id = t1.id) AND t1.id = 6 USING s1_res ON (s1_res.id = t1.id)
WHEN MATCHED AND s1_res.val = 0 THEN WHEN MATCHED AND s1_res.val = 0 THEN
DELETE DELETE
@ -283,9 +255,6 @@ MERGE INTO t1
UPDATE SET val = t1.val + 1 UPDATE SET val = t1.val + 1
WHEN NOT MATCHED THEN WHEN NOT MATCHED THEN
INSERT (id, val) VALUES (s1_res.id, s1_res.val); INSERT (id, val) VALUES (s1_res.id, s1_res.val);
SET citus.log_remote_commands to false;
-- As the id 6 is NO match, VALUES(6, 1) should appear in target
SELECT * FROM t1 order by id;
-- --
-- Test with multiple join conditions -- Test with multiple join conditions
@ -356,21 +325,15 @@ SELECT undistribute_table('s2');
SELECT create_distributed_table('t2', 'id'); SELECT create_distributed_table('t2', 'id');
SELECT create_distributed_table('s2', 'id'); SELECT create_distributed_table('s2', 'id');
SELECT * FROM t2 ORDER BY 1;
SET citus.log_remote_commands to true;
MERGE INTO t2 MERGE INTO t2
USING s2 USING s2
ON t2.id = s2.id AND t2.src = s2.src AND t2.id = 4 ON t2.id = s2.id AND t2.src = s2.src
WHEN MATCHED AND t2.val = 1 THEN WHEN MATCHED AND t2.val = 1 THEN
UPDATE SET val = s2.val + 10 UPDATE SET val = s2.val + 10
WHEN MATCHED THEN WHEN MATCHED THEN
DELETE DELETE
WHEN NOT MATCHED THEN WHEN NOT MATCHED THEN
INSERT (id, val, src) VALUES (s2.id, s2.val, s2.src); INSERT (id, val, src) VALUES (s2.id, s2.val, s2.src);
SET citus.log_remote_commands to false;
-- Row with id = 4 is a match for delete clause, row should be deleted
-- Row with id = 3 is a NO match, row from source will be inserted
SELECT * FROM t2 ORDER BY 1;
-- --
-- With sub-query as the MERGE source -- With sub-query as the MERGE source
@ -861,159 +824,10 @@ RESET client_min_messages;
SELECT * FROM ft_target; SELECT * FROM ft_target;
--
-- complex joins on the source side
--
-- source(join of two relations) relation is an unaliased join
CREATE TABLE target_cj(tid int, src text, val int);
CREATE TABLE source_cj1(sid1 int, src1 text, val1 int);
CREATE TABLE source_cj2(sid2 int, src2 text, val2 int);
INSERT INTO target_cj VALUES (1, 'target', 0);
INSERT INTO target_cj VALUES (2, 'target', 0);
INSERT INTO target_cj VALUES (2, 'target', 0);
INSERT INTO target_cj VALUES (3, 'target', 0);
INSERT INTO source_cj1 VALUES (2, 'source-1', 10);
INSERT INTO source_cj2 VALUES (2, 'source-2', 20);
BEGIN;
MERGE INTO target_cj t
USING source_cj1 s1 INNER JOIN source_cj2 s2 ON sid1 = sid2
ON t.tid = sid1 AND t.tid = 2
WHEN MATCHED THEN
UPDATE SET src = src2
WHEN NOT MATCHED THEN
DO NOTHING;
-- Gold result to compare against
SELECT * FROM target_cj ORDER BY 1;
ROLLBACK;
BEGIN;
-- try accessing columns from either side of the source join
MERGE INTO target_cj t
USING source_cj1 s2
INNER JOIN source_cj2 s1 ON sid1 = sid2 AND val1 = 10
ON t.tid = sid1 AND t.tid = 2
WHEN MATCHED THEN
UPDATE SET tid = sid2, src = src1, val = val2
WHEN NOT MATCHED THEN
DO NOTHING;
-- Gold result to compare against
SELECT * FROM target_cj ORDER BY 1;
ROLLBACK;
-- Test the same scenarios with distributed tables
SELECT create_distributed_table('target_cj', 'tid');
SELECT create_distributed_table('source_cj1', 'sid1');
SELECT create_distributed_table('source_cj2', 'sid2');
BEGIN;
SET citus.log_remote_commands to true;
MERGE INTO target_cj t
USING source_cj1 s1 INNER JOIN source_cj2 s2 ON sid1 = sid2
ON t.tid = sid1 AND t.tid = 2
WHEN MATCHED THEN
UPDATE SET src = src2
WHEN NOT MATCHED THEN
DO NOTHING;
SET citus.log_remote_commands to false;
SELECT * FROM target_cj ORDER BY 1;
ROLLBACK;
BEGIN;
-- try accessing columns from either side of the source join
MERGE INTO target_cj t
USING source_cj1 s2
INNER JOIN source_cj2 s1 ON sid1 = sid2 AND val1 = 10
ON t.tid = sid1 AND t.tid = 2
WHEN MATCHED THEN
UPDATE SET src = src1, val = val2
WHEN NOT MATCHED THEN
DO NOTHING;
SELECT * FROM target_cj ORDER BY 1;
ROLLBACK;
-- sub-query as a source
BEGIN;
MERGE INTO target_cj t
USING (SELECT * FROM source_cj1 WHERE sid1 = 2) sub
ON t.tid = sub.sid1 AND t.tid = 2
WHEN MATCHED THEN
UPDATE SET src = sub.src1, val = val1
WHEN NOT MATCHED THEN
DO NOTHING;
SELECT * FROM target_cj ORDER BY 1;
ROLLBACK;
-- Test self-join
BEGIN;
SELECT * FROM target_cj ORDER BY 1;
set citus.log_remote_commands to true;
MERGE INTO target_cj t1
USING (SELECT * FROM target_cj) sub
ON t1.tid = sub.tid AND t1.tid = 3
WHEN MATCHED THEN
UPDATE SET src = sub.src, val = sub.val + 100
WHEN NOT MATCHED THEN
DO NOTHING;
set citus.log_remote_commands to false;
SELECT * FROM target_cj ORDER BY 1;
ROLLBACK;
-- Test PREPARE
PREPARE foo(int) AS
MERGE INTO target_cj target
USING (SELECT * FROM source_cj1) sub
ON target.tid = sub.sid1 AND target.tid = $1
WHEN MATCHED THEN
UPDATE SET val = sub.val1
WHEN NOT MATCHED THEN
DO NOTHING;
SELECT * FROM target_cj ORDER BY 1;
BEGIN;
EXECUTE foo(2);
EXECUTE foo(2);
EXECUTE foo(2);
EXECUTE foo(2);
EXECUTE foo(2);
SELECT * FROM target_cj ORDER BY 1;
ROLLBACK;
BEGIN;
SET citus.log_remote_commands to true;
SET client_min_messages TO DEBUG1;
EXECUTE foo(2);
RESET client_min_messages;
EXECUTE foo(2);
SET citus.log_remote_commands to false;
SELECT * FROM target_cj ORDER BY 1;
ROLLBACK;
-- --
-- Error and Unsupported scenarios -- Error and Unsupported scenarios
-- --
-- try updating the distribution key column
BEGIN;
MERGE INTO target_cj t
USING source_cj1 s
ON t.tid = s.sid1 AND t.tid = 2
WHEN MATCHED THEN
UPDATE SET tid = tid + 9, src = src || ' updated by merge'
WHEN NOT MATCHED THEN
INSERT VALUES (sid1, 'inserted by merge', val1);
ROLLBACK;
-- Foreign table as target -- Foreign table as target
MERGE INTO foreign_table MERGE INTO foreign_table
USING ft_target ON (foreign_table.id = ft_target.id) USING ft_target ON (foreign_table.id = ft_target.id)
@ -1040,38 +854,6 @@ MERGE INTO t1
WHEN NOT MATCHED THEN WHEN NOT MATCHED THEN
INSERT (id, val) VALUES (s1.id, s1.val); INSERT (id, val) VALUES (s1.id, s1.val);
-- Now both s1 and t1 are distributed tables
SELECT undistribute_table('t1');
SELECT create_distributed_table('t1', 'id');
-- We have a potential pitfall where a function can be invoked in
-- the MERGE conditions which can insert/update to a random shard
CREATE OR REPLACE function merge_when_and_write() RETURNS BOOLEAN
LANGUAGE PLPGSQL AS
$$
BEGIN
INSERT INTO t1 VALUES (100, 100);
RETURN TRUE;
END;
$$;
-- Test preventing "ON" join condition from writing to the database
BEGIN;
MERGE INTO t1
USING s1 ON t1.id = s1.id AND t1.id = 2 AND (merge_when_and_write())
WHEN MATCHED THEN
UPDATE SET val = t1.val + s1.val;
ROLLBACK;
-- Test preventing WHEN clause(s) from writing to the database
BEGIN;
MERGE INTO t1
USING s1 ON t1.id = s1.id AND t1.id = 2
WHEN MATCHED AND (merge_when_and_write()) THEN
UPDATE SET val = t1.val + s1.val;
ROLLBACK;
-- Joining on partition columns with sub-query -- Joining on partition columns with sub-query
MERGE INTO t1 MERGE INTO t1
USING (SELECT * FROM s1) sub ON (sub.val = t1.id) -- sub.val is not a distribution column USING (SELECT * FROM s1) sub ON (sub.val = t1.id) -- sub.val is not a distribution column
@ -1215,132 +997,6 @@ WHEN MATCHED THEN
WHEN NOT MATCHED THEN WHEN NOT MATCHED THEN
INSERT VALUES(mv_source.id, mv_source.val); INSERT VALUES(mv_source.id, mv_source.val);
-- Distributed tables *must* be colocated
CREATE TABLE dist_target(id int, val varchar);
SELECT create_distributed_table('dist_target', 'id');
CREATE TABLE dist_source(id int, val varchar);
SELECT create_distributed_table('dist_source', 'id', colocate_with => 'none');
MERGE INTO dist_target
USING dist_source
ON dist_target.id = dist_source.id
WHEN MATCHED THEN
UPDATE SET val = dist_source.val
WHEN NOT MATCHED THEN
INSERT VALUES(dist_source.id, dist_source.val);
-- Distributed tables *must* be joined on distribution column
CREATE TABLE dist_colocated(id int, val int);
SELECT create_distributed_table('dist_colocated', 'id', colocate_with => 'dist_target');
MERGE INTO dist_target
USING dist_colocated
ON dist_target.id = dist_colocated.val -- val is not the distribution column
WHEN MATCHED THEN
UPDATE SET val = dist_colocated.val
WHEN NOT MATCHED THEN
INSERT VALUES(dist_colocated.id, dist_colocated.val);
-- MERGE command must be joined with with a constant qual on target relation
-- AND clause is missing
MERGE INTO dist_target
USING dist_colocated
ON dist_target.id = dist_colocated.id
WHEN MATCHED THEN
UPDATE SET val = dist_colocated.val
WHEN NOT MATCHED THEN
INSERT VALUES(dist_colocated.id, dist_colocated.val);
-- AND clause incorrect table (must be target)
MERGE INTO dist_target
USING dist_colocated
ON dist_target.id = dist_colocated.id AND dist_colocated.id = 1
WHEN MATCHED THEN
UPDATE SET val = dist_colocated.val
WHEN NOT MATCHED THEN
INSERT VALUES(dist_colocated.id, dist_colocated.val);
-- AND clause incorrect column (must be distribution column)
MERGE INTO dist_target
USING dist_colocated
ON dist_target.id = dist_colocated.id AND dist_target.val = 'const'
WHEN MATCHED THEN
UPDATE SET val = dist_colocated.val
WHEN NOT MATCHED THEN
INSERT VALUES(dist_colocated.id, dist_colocated.val);
-- Both the source and target must be distributed
MERGE INTO dist_target
USING (SELECT 100 id) AS source
ON dist_target.id = source.id AND dist_target.val = 'const'
WHEN MATCHED THEN
UPDATE SET val = 'source'
WHEN NOT MATCHED THEN
INSERT VALUES(source.id, 'source');
-- Non-hash distributed tables (append/range).
CREATE VIEW show_tables AS
SELECT logicalrelid, partmethod
FROM pg_dist_partition
WHERE (logicalrelid = 'dist_target'::regclass) OR (logicalrelid = 'dist_source'::regclass)
ORDER BY 1;
SELECT undistribute_table('dist_source');
SELECT create_distributed_table('dist_source', 'id', 'append');
SELECT * FROM show_tables;
MERGE INTO dist_target
USING dist_source
ON dist_target.id = dist_source.id
WHEN MATCHED THEN
UPDATE SET val = dist_source.val
WHEN NOT MATCHED THEN
INSERT VALUES(dist_source.id, dist_source.val);
SELECT undistribute_table('dist_source');
SELECT create_distributed_table('dist_source', 'id', 'range');
SELECT * FROM show_tables;
MERGE INTO dist_target
USING dist_source
ON dist_target.id = dist_source.id
WHEN MATCHED THEN
UPDATE SET val = dist_source.val
WHEN NOT MATCHED THEN
INSERT VALUES(dist_source.id, dist_source.val);
-- Both are append tables
SELECT undistribute_table('dist_target');
SELECT undistribute_table('dist_source');
SELECT create_distributed_table('dist_target', 'id', 'append');
SELECT create_distributed_table('dist_source', 'id', 'append');
SELECT * FROM show_tables;
MERGE INTO dist_target
USING dist_source
ON dist_target.id = dist_source.id
WHEN MATCHED THEN
UPDATE SET val = dist_source.val
WHEN NOT MATCHED THEN
INSERT VALUES(dist_source.id, dist_source.val);
-- Both are range tables
SELECT undistribute_table('dist_target');
SELECT undistribute_table('dist_source');
SELECT create_distributed_table('dist_target', 'id', 'range');
SELECT create_distributed_table('dist_source', 'id', 'range');
SELECT * FROM show_tables;
MERGE INTO dist_target
USING dist_source
ON dist_target.id = dist_source.id
WHEN MATCHED THEN
UPDATE SET val = dist_source.val
WHEN NOT MATCHED THEN
INSERT VALUES(dist_source.id, dist_source.val);
DROP SERVER foreign_server CASCADE; DROP SERVER foreign_server CASCADE;
DROP FUNCTION merge_when_and_write();
DROP SCHEMA merge_schema CASCADE; DROP SCHEMA merge_schema CASCADE;
SELECT 1 FROM master_remove_node('localhost', :master_port); SELECT 1 FROM master_remove_node('localhost', :master_port);

View File

@ -36,6 +36,48 @@ SELECT con.conname
\c - - :master_host :master_port \c - - :master_host :master_port
ALTER TABLE AT_AddConstNoName.products DROP CONSTRAINT products_pkey; ALTER TABLE AT_AddConstNoName.products DROP CONSTRAINT products_pkey;
-- Check "ADD PRIMARY KEY USING INDEX ..."
CREATE TABLE AT_AddConstNoName.tbl(col1 int, col2 int);
SELECT create_distributed_table('AT_AddConstNoName.tbl', 'col1');
CREATE UNIQUE INDEX my_index ON AT_AddConstNoName.tbl(col1);
ALTER TABLE AT_AddConstNoName.tbl ADD PRIMARY KEY USING INDEX my_index;
SELECT con.conname
FROM pg_catalog.pg_constraint con
INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid
INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace
WHERE rel.relname = 'tbl';
\c - - :public_worker_1_host :worker_1_port
SELECT con.conname
FROM pg_catalog.pg_constraint con
INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid
INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace
WHERE rel.relname LIKE 'tbl%' ORDER BY con.conname ASC;
\c - - :master_host :master_port
ALTER TABLE AT_AddConstNoName.tbl DROP CONSTRAINT my_index;
-- Check "ADD UNIQUE USING INDEX ..."
CREATE UNIQUE INDEX my_index ON AT_AddConstNoName.tbl(col1);
ALTER TABLE AT_AddConstNoName.tbl ADD UNIQUE USING INDEX my_index;
SELECT con.conname
FROM pg_catalog.pg_constraint con
INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid
INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace
WHERE rel.relname = 'tbl';
\c - - :public_worker_1_host :worker_1_port
SELECT con.conname
FROM pg_catalog.pg_constraint con
INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid
INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace
WHERE rel.relname LIKE 'tbl%'ORDER BY con.conname ASC;
\c - - :master_host :master_port
ALTER TABLE AT_AddConstNoName.tbl DROP CONSTRAINT my_index;
-- Check "ADD PRIMARY KEY DEFERRABLE" -- Check "ADD PRIMARY KEY DEFERRABLE"
ALTER TABLE AT_AddConstNoName.products ADD PRIMARY KEY(product_no) DEFERRABLE; ALTER TABLE AT_AddConstNoName.products ADD PRIMARY KEY(product_no) DEFERRABLE;
@ -212,7 +254,26 @@ SELECT con.conname, con.connoinherit
WHERE rel.relname = 'products'; WHERE rel.relname = 'products';
\c - - :public_worker_1_host :worker_1_port \c - - :public_worker_1_host :worker_1_port
SELECT con.conname, connoinherit SELECT con.conname, con.connoinherit
FROM pg_catalog.pg_constraint con
INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid
INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace
WHERE rel.relname = 'products_5410000';
\c - - :master_host :master_port
ALTER TABLE AT_AddConstNoName.products DROP CONSTRAINT products_check;
-- Check "ADD CHECK ... NOT VALID"
ALTER TABLE AT_AddConstNoName.products ADD CHECK (product_no > 0 AND price > 0) NOT VALID;
SELECT con.conname, con.convalidated
FROM pg_catalog.pg_constraint con
INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid
INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace
WHERE rel.relname = 'products';
\c - - :public_worker_1_host :worker_1_port
SELECT con.conname, con.convalidated
FROM pg_catalog.pg_constraint con FROM pg_catalog.pg_constraint con
INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid
INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace

View File

@ -154,6 +154,25 @@ SET SEARCH_PATH = at_add_fk;
ALTER TABLE referencing_table DROP CONSTRAINT referencing_table_ref_id_fkey; ALTER TABLE referencing_table DROP CONSTRAINT referencing_table_ref_id_fkey;
-- test NOT VALID
ALTER TABLE referencing_table ADD FOREIGN KEY(ref_id) REFERENCES referenced_table(id) NOT VALID;
SELECT con.conname, con.convalidated
FROM pg_catalog.pg_constraint con
INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid
INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace
WHERE rel.relname = 'referencing_table';
\c - - :public_worker_1_host :worker_1_port
SELECT con.conname, con.convalidated
FROM pg_catalog.pg_constraint con
INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid
INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace
WHERE rel.relname LIKE 'referencing_table%' ORDER BY con.conname ASC;
\c - - :master_host :master_port
SET SEARCH_PATH = at_add_fk;
ALTER TABLE referencing_table DROP CONSTRAINT referencing_table_ref_id_fkey;
-- test ON DELETE NO ACTION + DEFERABLE + INITIALLY DEFERRED -- test ON DELETE NO ACTION + DEFERABLE + INITIALLY DEFERRED
ALTER TABLE referencing_table ADD FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE NO ACTION DEFERRABLE INITIALLY DEFERRED; ALTER TABLE referencing_table ADD FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE NO ACTION DEFERRABLE INITIALLY DEFERRED;

View File

@ -331,6 +331,10 @@ SELECT 1 FROM columnar_table; -- seq scan
CREATE TABLE new_columnar_table (a int) USING columnar; CREATE TABLE new_columnar_table (a int) USING columnar;
-- disable version checks for other sessions too
ALTER SYSTEM SET citus.enable_version_checks TO OFF;
SELECT pg_reload_conf();
-- do cleanup for the rest of the tests -- do cleanup for the rest of the tests
SET citus.enable_version_checks TO OFF; SET citus.enable_version_checks TO OFF;
SET columnar.enable_version_checks TO OFF; SET columnar.enable_version_checks TO OFF;
@ -563,6 +567,16 @@ RESET client_min_messages;
SELECT * FROM multi_extension.print_extension_changes(); SELECT * FROM multi_extension.print_extension_changes();
-- Test downgrade to 11.2-1 from 11.3-1
ALTER EXTENSION citus UPDATE TO '11.3-1';
ALTER EXTENSION citus UPDATE TO '11.2-1';
-- Should be empty result since upgrade+downgrade should be a no-op
SELECT * FROM multi_extension.print_extension_changes();
-- Snapshot of state at 11.3-1
ALTER EXTENSION citus UPDATE TO '11.3-1';
SELECT * FROM multi_extension.print_extension_changes();
DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff;
-- show running version -- show running version
@ -582,6 +596,11 @@ ORDER BY 1, 2;
-- see incompatible version errors out -- see incompatible version errors out
RESET citus.enable_version_checks; RESET citus.enable_version_checks;
RESET columnar.enable_version_checks; RESET columnar.enable_version_checks;
-- reset version check config for other sessions too
ALTER SYSTEM RESET citus.enable_version_checks;
SELECT pg_reload_conf();
DROP EXTENSION citus; DROP EXTENSION citus;
DROP EXTENSION citus_columnar; DROP EXTENSION citus_columnar;
CREATE EXTENSION citus VERSION '8.0-1'; CREATE EXTENSION citus VERSION '8.0-1';

View File

@ -0,0 +1,174 @@
-- multi recursive queries with joins, subqueries, and ctes
CREATE SCHEMA multi_recursive;
SET search_path TO multi_recursive;
DROP TABLE IF EXISTS tbl_dist1;
CREATE TABLE tbl_dist1(id int);
SELECT create_distributed_table('tbl_dist1','id');
DROP TABLE IF EXISTS tbl_ref1;
CREATE TABLE tbl_ref1(id int);
SELECT create_reference_table('tbl_ref1');
INSERT INTO tbl_dist1 SELECT i FROM generate_series(0,10) i;
INSERT INTO tbl_ref1 SELECT i FROM generate_series(0,10) i;
-- https://github.com/citusdata/citus/issues/6653
-- The reason why inlined queries failed are all the same. After we modified the query at first pass, second pass finds out
-- noncolocated queries as we donot create equivalances between nondistributed-distributed tables.
-- QUERY1
-- recursive planner multipass the query and fails.
-- Why inlined query failed?
-- limit clause is recursively planned in inlined cte. First pass finishes here. At second pass, noncolocated queries and
-- recurring full join are recursively planned. We detect that and throw error.
SELECT t1.id
FROM (
SELECT t2.id
FROM (
SELECT t0.id
FROM tbl_dist1 t0
LIMIT 5
) AS t2
INNER JOIN tbl_dist1 AS t3 USING (id)
) AS t1
FULL JOIN tbl_dist1 t4 USING (id);
-- QUERY2
-- recursive planner multipass the query with inlined cte and fails. Then, cte is planned without inlining and it succeeds.
-- Why inlined query failed?
-- recurring left join is recursively planned in inlined cte. Then, limit clause causes another recursive planning. First pass
-- finishes here. At second pass, noncolocated queries and recurring right join are recursively planned. We detect that and
-- throw error.
SET client_min_messages TO DEBUG1;
WITH cte_0 AS (
SELECT id FROM tbl_dist1 WHERE id IN (
SELECT id FROM tbl_ref1
LEFT JOIN tbl_dist1 USING (id)
)
)
SELECT count(id) FROM tbl_dist1
RIGHT JOIN (
SELECT table_5.id FROM (
SELECT id FROM cte_0 LIMIT 0
) AS table_5
RIGHT JOIN tbl_dist1 USING (id)
) AS table_4 USING (id);
RESET client_min_messages;
DROP TABLE IF EXISTS dist0;
CREATE TABLE dist0(id int);
SELECT create_distributed_table('dist0','id');
DROP TABLE IF EXISTS dist1;
CREATE TABLE dist1(id int);
SELECT create_distributed_table('dist1','id');
INSERT INTO dist0 SELECT i FROM generate_series(1005,1025) i;
INSERT INTO dist1 SELECT i FROM generate_series(1015,1035) i;
-- QUERY3
-- recursive planner multipass the query with inlined cte and fails. Then, cte is planned without inlining and it succeeds.
-- Why inlined query failed?
-- noncolocated queries are recursively planned. First pass finishes here. Second pass also recursively plans noncolocated
-- queries and recurring full join. We detect the error and throw it.
SET client_min_messages TO DEBUG1;
WITH cte_0 AS (
SELECT id FROM dist0
RIGHT JOIN dist0 AS table_1 USING (id)
ORDER BY id
)
SELECT avg(avgsub.id) FROM (
SELECT table_2.id FROM (
SELECT table_3.id FROM (
SELECT table_5.id FROM cte_0 AS table_5, dist1
) AS table_3 INNER JOIN dist1 USING (id)
) AS table_2 FULL JOIN dist0 USING (id)
) AS avgsub;
RESET client_min_messages;
DROP TABLE IF EXISTS dist0;
CREATE TABLE dist0(id int);
SELECT create_distributed_table('dist0','id');
DROP TABLE IF EXISTS dist1;
CREATE TABLE dist1(id int);
SELECT create_distributed_table('dist1','id');
INSERT INTO dist0 SELECT i FROM generate_series(0,10) i;
INSERT INTO dist0 SELECT * FROM dist0 ORDER BY id LIMIT 1;
INSERT INTO dist1 SELECT i FROM generate_series(0,10) i;
INSERT INTO dist1 SELECT * FROM dist1 ORDER BY id LIMIT 1;
-- QUERY4
-- recursive planner multipass the query fails.
-- Why inlined query failed?
-- limit clause is recursively planned at the first pass. At second pass noncolocated queries are recursively planned.
-- We detect that and throw error.
SET client_min_messages TO DEBUG1;
SELECT avg(avgsub.id) FROM (
SELECT table_0.id FROM (
SELECT table_1.id FROM (
SELECT table_2.id FROM (
SELECT table_3.id FROM (
SELECT table_4.id FROM dist0 AS table_4
LEFT JOIN dist1 AS table_5 USING (id)
) AS table_3 INNER JOIN dist0 AS table_6 USING (id)
) AS table_2 WHERE table_2.id < 10 ORDER BY id LIMIT 47
) AS table_1 RIGHT JOIN dist0 AS table_7 USING (id)
) AS table_0 RIGHT JOIN dist1 AS table_8 USING (id)
) AS avgsub;
-- QUERY5
-- recursive planner multipass the query with inlined cte and fails. Then, cte is planned without inlining and it succeeds.
-- Why inlined query failed?
-- limit clause is recursively planned. First pass finishes here. At second pass, noncolocated tables and recurring full join
-- are recursively planned. We detect that and throw error.
WITH cte_0 AS (
SELECT table_0.id FROM dist1 AS table_0 LEFT JOIN dist1 AS table_1 USING (id) ORDER BY id LIMIT 41
)
SELECT avg(avgsub.id) FROM (
SELECT table_4.id FROM (
SELECT table_5.id FROM (
SELECT table_6.id FROM cte_0 AS table_6
) AS table_5
INNER JOIN dist0 USING (id) INNER JOIN dist1 AS table_9 USING (id)
) AS table_4 FULL JOIN dist0 USING (id)
) AS avgsub;
-- QUERY6
-- recursive planner multipass the query with inlined cte and fails. Then, cte is planned without inlining and it succeeds.
-- Why inlined query failed?
-- Same query and flow as above with explicit (NOT MATERIALIZED) option, which makes it directly inlinable. Even if
-- planner fails with inlined query, it succeeds without inlining.
WITH cte_0 AS (
SELECT table_0.id FROM dist1 AS table_0 LEFT JOIN dist1 AS table_1 USING (id) ORDER BY id LIMIT 41
)
SELECT avg(avgsub.id) FROM (
SELECT table_4.id FROM (
SELECT table_5.id FROM (
SELECT table_6.id FROM cte_0 AS table_6
) AS table_5
INNER JOIN dist0 USING (id) INNER JOIN dist1 AS table_9 USING (id)
) AS table_4 FULL JOIN dist0 USING (id)
) AS avgsub;
-- QUERY7
-- recursive planner multipass the query and fails. Note that cte is not used in the query.
-- Why inlined query failed?
-- limit clause is recursively planned. First pass finishes here. At second pass noncolocated queries are recursively planned.
-- We detect multipass and throw error.
WITH cte_0 AS (
SELECT table_0.id FROM dist1 AS table_0 FULL JOIN dist1 AS table_1 USING (id)
)
SELECT avg(table_5.id) FROM (
SELECT table_6.id FROM (
SELECT table_7.id FROM dist0 AS table_7 ORDER BY id LIMIT 87
) AS table_6 INNER JOIN dist0 AS table_8 USING (id) WHERE table_8.id < 0 ORDER BY id
) AS table_5 INNER JOIN dist0 AS table_9 USING (id);
RESET client_min_messages;
DROP SCHEMA multi_recursive CASCADE;

View File

@ -643,21 +643,24 @@ SELECT
FROM FROM
reference_table_test, colocated_table_test reference_table_test, colocated_table_test
WHERE WHERE
colocated_table_test.value_1 = reference_table_test.value_1; colocated_table_test.value_1 = reference_table_test.value_1
ORDER BY 1;
SELECT SELECT
colocated_table_test.value_2 colocated_table_test.value_2
FROM FROM
reference_table_test, colocated_table_test reference_table_test, colocated_table_test
WHERE WHERE
colocated_table_test.value_2 = reference_table_test.value_2; colocated_table_test.value_2 = reference_table_test.value_2
ORDER BY 1;
SELECT SELECT
colocated_table_test.value_2 colocated_table_test.value_2
FROM FROM
colocated_table_test, reference_table_test colocated_table_test, reference_table_test
WHERE WHERE
reference_table_test.value_1 = colocated_table_test.value_1; reference_table_test.value_1 = colocated_table_test.value_1
ORDER BY 1;
SET citus.enable_repartition_joins = on; SET citus.enable_repartition_joins = on;
SELECT SELECT
@ -730,6 +733,7 @@ FROM
colocated_table_test_2, reference_table_test colocated_table_test_2, reference_table_test
WHERE WHERE
colocated_table_test_2.value_4 = reference_table_test.value_4 colocated_table_test_2.value_4 = reference_table_test.value_4
ORDER BY 1
RETURNING value_1, value_2; RETURNING value_1, value_2;
-- similar query with the above, this time partition key but without equality -- similar query with the above, this time partition key but without equality

View File

@ -608,14 +608,6 @@ USING wq_source s ON t.tid = s.sid
WHEN MATCHED AND (merge_when_and_write()) THEN WHEN MATCHED AND (merge_when_and_write()) THEN
UPDATE SET balance = t.balance + s.balance; UPDATE SET balance = t.balance + s.balance;
ROLLBACK; ROLLBACK;
-- Test preventing ON condition from writing to the database
BEGIN;
MERGE INTO wq_target t
USING wq_source s ON t.tid = s.sid AND (merge_when_and_write())
WHEN MATCHED THEN
UPDATE SET balance = t.balance + s.balance;
ROLLBACK;
drop function merge_when_and_write(); drop function merge_when_and_write();
DROP TABLE wq_target, wq_source; DROP TABLE wq_target, wq_source;

View File

@ -360,3 +360,37 @@ FROM
ORDER BY 1 ORDER BY 1
LIMIT 5 LIMIT 5
) as foo WHERE user_id IN (SELECT count(*) FROM users_table GROUP BY user_id); ) as foo WHERE user_id IN (SELECT count(*) FROM users_table GROUP BY user_id);
CREATE TABLE dist(id int, value int);
SELECT create_distributed_table('dist','id');
INSERT INTO dist SELECT i, i FROM generate_series(0,100) i;
CREATE TABLE ref(id int);
SELECT create_reference_table('ref');
INSERT INTO ref SELECT i FROM generate_series(50,150) i;
CREATE TABLE local(id int);
INSERT INTO local SELECT i FROM generate_series(50,150) i;
-- planner recursively plans local table in local-dist join and then the whole query is routed
SELECT COUNT(*) FROM dist JOIN local USING(id)
WHERE
dist.id IN (SELECT id FROM dist WHERE id = 55) AND
dist.id = 55 AND
dist.value IN (SELECT value FROM dist WHERE id = 55);
-- subquery in WHERE clause should be recursively planned after planner recursively plans recurring full join
SELECT COUNT(*) FROM ref FULL JOIN dist USING (id)
WHERE
dist.id IN (SELECT id FROM dist GROUP BY id);
-- subqueries in WHERE clause should be recursively planned after planner recursively plans full outer join
SELECT COUNT(*) FROM dist FULL JOIN ref USING(id)
WHERE
dist.id IN (SELECT id FROM dist WHERE id > 5) AND
dist.value IN (SELECT value FROM dist WHERE id > 15);
-- sublinks in the targetlist are not supported
SELECT (SELECT id FROM dist WHERE dist.id > d1.id GROUP BY id) FROM ref FULL JOIN dist d1 USING (id);
DROP TABLE dist;
DROP TABLE ref;
DROP TABLE local;

View File

@ -276,3 +276,7 @@ $$ LANGUAGE plpgsql;
CREATE TABLE text_data (id SERIAL, value TEXT) USING COLUMNAR; CREATE TABLE text_data (id SERIAL, value TEXT) USING COLUMNAR;
INSERT INTO text_data (value) SELECT generate_random_string(1024 * 10) FROM generate_series(0,10); INSERT INTO text_data (value) SELECT generate_random_string(1024 * 10) FROM generate_series(0,10);
select count(DISTINCT value) from text_data; select count(DISTINCT value) from text_data;
-- test using a columnar partition
CREATE TABLE foo (d DATE NOT NULL) PARTITION BY RANGE (d);
CREATE TABLE foo3 PARTITION OF foo FOR VALUES FROM ('2009-02-01') TO ('2009-03-01') USING COLUMNAR;