mirror of https://github.com/citusdata/citus.git
Compare commits
89 Commits
| Author | SHA1 | Date |
|---|---|---|
|
|
31911d8297 | |
|
|
3399d660f3 | |
|
|
002046b87b | |
|
|
79cabe7eca | |
|
|
e28591df08 | |
|
|
a39ce7942f | |
|
|
ae2eb65be0 | |
|
|
c600eabd82 | |
|
|
662b7248db | |
|
|
c843cb2060 | |
|
|
4e47293f9f | |
|
|
accd01fbf6 | |
|
|
cf533ebae9 | |
|
|
8bba66f207 | |
|
|
f80fa1c83b | |
|
|
4244bc8516 | |
|
|
b2356f1c85 | |
|
|
daa69bec8f | |
|
|
bc41e7b94f | |
|
|
b10aa02908 | |
|
|
b63572d72f | |
|
|
7a7a0ba9c7 | |
|
|
5a71f0d1ca | |
|
|
fa7ca79c6f | |
|
|
94653c1f4e | |
|
|
be2fcda071 | |
|
|
61b491f0f4 | |
|
|
6251eab9b7 | |
|
|
e0570baad6 | |
|
|
503a2aba73 | |
|
|
86010de733 | |
|
|
458299035b | |
|
|
188c182be4 | |
|
|
dba9379ea5 | |
|
|
785a87c659 | |
|
|
95477e6d02 | |
|
|
5fc4cea1ce | |
|
|
bf959de39e | |
|
|
90f2ab6648 | |
|
|
3ca66e1fcc | |
|
|
5d71fca3b4 | |
|
|
76f18624e5 | |
|
|
abd50a0bb8 | |
|
|
aa0ac0af60 | |
|
|
432b69eb9d | |
|
|
f1dd976a14 | |
|
|
351cb2044d | |
|
|
287abea661 | |
|
|
f0014cf0df | |
|
|
d9652bf5f9 | |
|
|
77d5807fd6 | |
|
|
2a6414c727 | |
|
|
c5dde4b115 | |
|
|
5a3648b2cb | |
|
|
d4dfdd765b | |
|
|
cec1848b13 | |
|
|
bb840e58a7 | |
|
|
5eb1d93be1 | |
|
|
de045402f3 | |
|
|
81776fe190 | |
|
|
80945212ae | |
|
|
83b25e1fb1 | |
|
|
b5e70f56ab | |
|
|
d2ea4043d4 | |
|
|
10d62d50ea | |
|
|
b4cb1a94e9 | |
|
|
becc02b398 | |
|
|
360fbe3b99 | |
|
|
b58af1c8d5 | |
|
|
4012e5938a | |
|
|
8bb8b2ce2d | |
|
|
b7bfe42f1a | |
|
|
0c658b73fc | |
|
|
2834fa26c9 | |
|
|
8ece8acac7 | |
|
|
0fd95d71e4 | |
|
|
d5f0ec5cd1 | |
|
|
544b6c4716 | |
|
|
2e1de77744 | |
|
|
bb6eeb17cc | |
|
|
0a5cae19ed | |
|
|
62e5fcfe09 | |
|
|
ce7ddc0d3d | |
|
|
aaa31376e0 | |
|
|
439870f3a9 | |
|
|
785287c58f | |
|
|
86b5bc6a20 | |
|
|
f1f0b09f73 | |
|
|
683ead9607 |
|
|
@ -2,6 +2,8 @@
|
|||
"image": "ghcr.io/citusdata/citus-devcontainer:main",
|
||||
"runArgs": [
|
||||
"--cap-add=SYS_PTRACE",
|
||||
"--cap-add=SYS_NICE", // allow NUMA page inquiry
|
||||
"--security-opt=seccomp=unconfined", // unblocks move_pages() in the container
|
||||
"--ulimit=core=-1",
|
||||
],
|
||||
"forwardPorts": [
|
||||
|
|
|
|||
|
|
@ -13,15 +13,3 @@ runs:
|
|||
token: ${{ inputs.codecov_token }}
|
||||
verbose: true
|
||||
gcov: true
|
||||
- name: Create codeclimate coverage
|
||||
run: |-
|
||||
lcov --directory . --capture --output-file lcov.info
|
||||
lcov --remove lcov.info -o lcov.info '/usr/*'
|
||||
sed "s=^SF:$PWD/=SF:=g" -i lcov.info # relative pats are required by codeclimate
|
||||
mkdir -p /tmp/codeclimate
|
||||
cc-test-reporter format-coverage -t lcov -o /tmp/codeclimate/${{ inputs.flags }}.json lcov.info
|
||||
shell: bash
|
||||
- uses: actions/upload-artifact@v4.6.0
|
||||
with:
|
||||
path: "/tmp/codeclimate/*.json"
|
||||
name: codeclimate-${{ inputs.flags }}
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ jobs:
|
|||
style_checker_image_name: "ghcr.io/citusdata/stylechecker"
|
||||
style_checker_tools_version: "0.8.18"
|
||||
sql_snapshot_pg_version: "17.6"
|
||||
image_suffix: "-v4df94a0"
|
||||
image_suffix: "-va20872f"
|
||||
pg15_version: '{ "major": "15", "full": "15.14" }'
|
||||
pg16_version: '{ "major": "16", "full": "16.10" }'
|
||||
pg17_version: '{ "major": "17", "full": "17.6" }'
|
||||
|
|
@ -225,10 +225,16 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ needs.params.outputs.image_suffix }}"
|
||||
options: --user root --dns=8.8.8.8
|
||||
options: >-
|
||||
--user root
|
||||
--dns=8.8.8.8
|
||||
--cap-add=SYS_NICE
|
||||
--security-opt seccomp=unconfined
|
||||
# Due to Github creates a default network for each job, we need to use
|
||||
# --dns= to have similar DNS settings as our other CI systems or local
|
||||
# machines. Otherwise, we may see different results.
|
||||
# and grant caps so PG18's NUMA introspection (pg_shmem_allocations_numa -> move_pages)
|
||||
# doesn't fail with EPERM in CI.
|
||||
needs:
|
||||
- params
|
||||
- build
|
||||
|
|
@ -358,14 +364,20 @@ jobs:
|
|||
flags: ${{ env.old_pg_major }}_${{ env.new_pg_major }}_upgrade
|
||||
codecov_token: ${{ secrets.CODECOV_TOKEN }}
|
||||
test-citus-upgrade:
|
||||
name: PG${{ fromJson(needs.params.outputs.pg15_version).major }} - check-citus-upgrade
|
||||
name: PG${{ fromJson(matrix.pg_version).major }} - check-citus-upgrade
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: "${{ needs.params.outputs.citusupgrade_image_name }}:${{ fromJson(needs.params.outputs.pg15_version).full }}${{ needs.params.outputs.image_suffix }}"
|
||||
image: "${{ needs.params.outputs.citusupgrade_image_name }}:${{ fromJson(matrix.pg_version).full }}${{ needs.params.outputs.image_suffix }}"
|
||||
options: --user root
|
||||
needs:
|
||||
- params
|
||||
- build
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
pg_version:
|
||||
- ${{ needs.params.outputs.pg15_version }}
|
||||
- ${{ needs.params.outputs.pg16_version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: "./.github/actions/setup_extension"
|
||||
|
|
@ -374,7 +386,7 @@ jobs:
|
|||
- name: Install and test citus upgrade
|
||||
run: |-
|
||||
# run make check-citus-upgrade for all citus versions
|
||||
# the image has ${CITUS_VERSIONS} set with all verions it contains the binaries of
|
||||
# the image has ${CITUS_VERSIONS} set with all versions it contains the binaries of
|
||||
for citus_version in ${CITUS_VERSIONS}; do \
|
||||
gosu circleci \
|
||||
make -C src/test/regress \
|
||||
|
|
@ -385,7 +397,7 @@ jobs:
|
|||
citus-post-tar=${GITHUB_WORKSPACE}/install-$PG_MAJOR.tar; \
|
||||
done;
|
||||
# run make check-citus-upgrade-mixed for all citus versions
|
||||
# the image has ${CITUS_VERSIONS} set with all verions it contains the binaries of
|
||||
# the image has ${CITUS_VERSIONS} set with all versions it contains the binaries of
|
||||
for citus_version in ${CITUS_VERSIONS}; do \
|
||||
gosu circleci \
|
||||
make -C src/test/regress \
|
||||
|
|
@ -404,30 +416,6 @@ jobs:
|
|||
with:
|
||||
flags: ${{ env.PG_MAJOR }}_citus_upgrade
|
||||
codecov_token: ${{ secrets.CODECOV_TOKEN }}
|
||||
upload-coverage:
|
||||
# secret below is not available for forks so disabling upload action for them
|
||||
if: ${{ github.event.pull_request.head.repo.full_name == github.repository || github.event_name != 'pull_request' }}
|
||||
env:
|
||||
CC_TEST_REPORTER_ID: ${{ secrets.CC_TEST_REPORTER_ID }}
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: ${{ needs.params.outputs.test_image_name }}:${{ fromJson(needs.params.outputs.pg17_version).full }}${{ needs.params.outputs.image_suffix }}
|
||||
needs:
|
||||
- params
|
||||
- test-citus
|
||||
- test-arbitrary-configs
|
||||
- test-citus-upgrade
|
||||
- test-pg-upgrade
|
||||
steps:
|
||||
- uses: actions/download-artifact@v4.1.8
|
||||
with:
|
||||
pattern: codeclimate*
|
||||
path: codeclimate
|
||||
merge-multiple: true
|
||||
- name: Upload coverage results to Code Climate
|
||||
run: |-
|
||||
cc-test-reporter sum-coverage codeclimate/*.json -o total.json
|
||||
cc-test-reporter upload-coverage -i total.json
|
||||
ch_benchmark:
|
||||
name: CH Benchmark
|
||||
if: startsWith(github.ref, 'refs/heads/ch_benchmark/')
|
||||
|
|
|
|||
|
|
@ -60,8 +60,7 @@ jobs:
|
|||
libzstd-dev \
|
||||
libzstd1 \
|
||||
lintian \
|
||||
postgresql-server-dev-15 \
|
||||
postgresql-server-dev-all \
|
||||
postgresql-server-dev-17 \
|
||||
python3-pip \
|
||||
python3-setuptools \
|
||||
wget \
|
||||
|
|
|
|||
45
CHANGELOG.md
45
CHANGELOG.md
|
|
@ -1,3 +1,48 @@
|
|||
### citus v13.1.1 (Oct 1st, 2025) ###
|
||||
|
||||
* Adds support for latest PG minors: 14.19, 15.14, 16.10 (#8142)
|
||||
|
||||
* Fixes an assertion failure when an expression in the query references
|
||||
a CTE (#8106)
|
||||
|
||||
* Fixes a bug that causes an unexpected error when executing
|
||||
repartitioned MERGE (#8201)
|
||||
|
||||
* Fixes a bug that causes allowing UPDATE / MERGE queries that may
|
||||
change the distribution column value (#8214)
|
||||
|
||||
* Updates dynamic_library_path automatically when CDC is enabled (#8025)
|
||||
|
||||
### citus v13.0.5 (Oct 1st, 2025) ###
|
||||
|
||||
* Adds support for latest PG minors: 14.19, 15.14, 16.10 (#7986, #8142)
|
||||
|
||||
* Fixes a bug that causes an unexpected error when executing
|
||||
repartitioned MERGE (#8201)
|
||||
|
||||
* Fixes a bug that causes allowing UPDATE / MERGE queries that may
|
||||
change the distribution column value (#8214)
|
||||
|
||||
* Fixes a bug in redundant WHERE clause detection (#8162)
|
||||
|
||||
* Updates dynamic_library_path automatically when CDC is enabled (#8025)
|
||||
|
||||
### citus v12.1.10 (Oct 1, 2025) ###
|
||||
|
||||
* Adds support for latest PG minors: 14.19, 15.14, 16.10 (#7986, #8142)
|
||||
|
||||
* Fixes a bug that causes allowing UPDATE / MERGE queries that may
|
||||
change the distribution column value (#8214)
|
||||
|
||||
* Fixes an assertion failure that happens when querying a view that is
|
||||
defined on distributed tables (#8136)
|
||||
|
||||
### citus v12.1.9 (Sep 3, 2025) ###
|
||||
|
||||
* Adds a GUC for queries with outer joins and pseudoconstant quals (#8163)
|
||||
|
||||
* Updates dynamic_library_path automatically when CDC is enabled (#7715)
|
||||
|
||||
### citus v13.2.0 (August 18, 2025) ###
|
||||
|
||||
* Adds `citus_add_clone_node()`, `citus_add_clone_node_with_nodeid()`,
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
#! /bin/sh
|
||||
# Guess values for system-dependent variables and create Makefiles.
|
||||
# Generated by GNU Autoconf 2.69 for Citus 13.2.0.
|
||||
# Generated by GNU Autoconf 2.69 for Citus 14.0devel.
|
||||
#
|
||||
#
|
||||
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
|
||||
|
|
@ -579,8 +579,8 @@ MAKEFLAGS=
|
|||
# Identity of this package.
|
||||
PACKAGE_NAME='Citus'
|
||||
PACKAGE_TARNAME='citus'
|
||||
PACKAGE_VERSION='13.2.0'
|
||||
PACKAGE_STRING='Citus 13.2.0'
|
||||
PACKAGE_VERSION='14.0devel'
|
||||
PACKAGE_STRING='Citus 14.0devel'
|
||||
PACKAGE_BUGREPORT=''
|
||||
PACKAGE_URL=''
|
||||
|
||||
|
|
@ -1262,7 +1262,7 @@ if test "$ac_init_help" = "long"; then
|
|||
# Omit some internal or obsolete options to make the list less imposing.
|
||||
# This message is too long to be a string in the A/UX 3.1 sh.
|
||||
cat <<_ACEOF
|
||||
\`configure' configures Citus 13.2.0 to adapt to many kinds of systems.
|
||||
\`configure' configures Citus 14.0devel to adapt to many kinds of systems.
|
||||
|
||||
Usage: $0 [OPTION]... [VAR=VALUE]...
|
||||
|
||||
|
|
@ -1324,7 +1324,7 @@ fi
|
|||
|
||||
if test -n "$ac_init_help"; then
|
||||
case $ac_init_help in
|
||||
short | recursive ) echo "Configuration of Citus 13.2.0:";;
|
||||
short | recursive ) echo "Configuration of Citus 14.0devel:";;
|
||||
esac
|
||||
cat <<\_ACEOF
|
||||
|
||||
|
|
@ -1429,7 +1429,7 @@ fi
|
|||
test -n "$ac_init_help" && exit $ac_status
|
||||
if $ac_init_version; then
|
||||
cat <<\_ACEOF
|
||||
Citus configure 13.2.0
|
||||
Citus configure 14.0devel
|
||||
generated by GNU Autoconf 2.69
|
||||
|
||||
Copyright (C) 2012 Free Software Foundation, Inc.
|
||||
|
|
@ -1912,7 +1912,7 @@ cat >config.log <<_ACEOF
|
|||
This file contains any messages produced by compilers while
|
||||
running configure, to aid debugging if configure makes a mistake.
|
||||
|
||||
It was created by Citus $as_me 13.2.0, which was
|
||||
It was created by Citus $as_me 14.0devel, which was
|
||||
generated by GNU Autoconf 2.69. Invocation command line was
|
||||
|
||||
$ $0 $@
|
||||
|
|
@ -5393,7 +5393,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
|
|||
# report actual input values of CONFIG_FILES etc. instead of their
|
||||
# values after options handling.
|
||||
ac_log="
|
||||
This file was extended by Citus $as_me 13.2.0, which was
|
||||
This file was extended by Citus $as_me 14.0devel, which was
|
||||
generated by GNU Autoconf 2.69. Invocation command line was
|
||||
|
||||
CONFIG_FILES = $CONFIG_FILES
|
||||
|
|
@ -5455,7 +5455,7 @@ _ACEOF
|
|||
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
|
||||
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
|
||||
ac_cs_version="\\
|
||||
Citus config.status 13.2.0
|
||||
Citus config.status 14.0devel
|
||||
configured by $0, generated by GNU Autoconf 2.69,
|
||||
with options \\"\$ac_cs_config\\"
|
||||
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@
|
|||
# everyone needing autoconf installed, the resulting files are checked
|
||||
# into the SCM.
|
||||
|
||||
AC_INIT([Citus], [13.2.0])
|
||||
AC_INIT([Citus], [14.0devel])
|
||||
AC_COPYRIGHT([Copyright (c) Citus Data, Inc.])
|
||||
|
||||
# we'll need sed and awk for some of the version commands
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
# Columnar extension
|
||||
comment = 'Citus Columnar extension'
|
||||
default_version = '13.2-1'
|
||||
default_version = '14.0-1'
|
||||
module_pathname = '$libdir/citus_columnar'
|
||||
relocatable = false
|
||||
schema = pg_catalog
|
||||
|
|
|
|||
|
|
@ -1556,8 +1556,7 @@ ColumnarPerStripeScanCost(RelOptInfo *rel, Oid relationId, int numberOfColumnsRe
|
|||
ereport(ERROR, (errmsg("could not open relation with OID %u", relationId)));
|
||||
}
|
||||
|
||||
List *stripeList = StripesForRelfilelocator(RelationPhysicalIdentifier_compat(
|
||||
relation));
|
||||
List *stripeList = StripesForRelfilelocator(relation);
|
||||
RelationClose(relation);
|
||||
|
||||
uint32 maxColumnCount = 0;
|
||||
|
|
@ -1614,8 +1613,7 @@ ColumnarTableStripeCount(Oid relationId)
|
|||
ereport(ERROR, (errmsg("could not open relation with OID %u", relationId)));
|
||||
}
|
||||
|
||||
List *stripeList = StripesForRelfilelocator(RelationPhysicalIdentifier_compat(
|
||||
relation));
|
||||
List *stripeList = StripesForRelfilelocator(relation);
|
||||
int stripeCount = list_length(stripeList);
|
||||
RelationClose(relation);
|
||||
|
||||
|
|
|
|||
|
|
@ -125,7 +125,7 @@ static Oid ColumnarChunkGroupRelationId(void);
|
|||
static Oid ColumnarChunkIndexRelationId(void);
|
||||
static Oid ColumnarChunkGroupIndexRelationId(void);
|
||||
static Oid ColumnarNamespaceId(void);
|
||||
static uint64 LookupStorageId(RelFileLocator relfilelocator);
|
||||
static uint64 LookupStorageId(Oid relationId, RelFileLocator relfilelocator);
|
||||
static uint64 GetHighestUsedRowNumber(uint64 storageId);
|
||||
static void DeleteStorageFromColumnarMetadataTable(Oid metadataTableId,
|
||||
AttrNumber storageIdAtrrNumber,
|
||||
|
|
@ -606,7 +606,7 @@ ReadColumnarOptions(Oid regclass, ColumnarOptions *options)
|
|||
* of columnar.chunk.
|
||||
*/
|
||||
void
|
||||
SaveStripeSkipList(RelFileLocator relfilelocator, uint64 stripe,
|
||||
SaveStripeSkipList(Oid relid, RelFileLocator relfilelocator, uint64 stripe,
|
||||
StripeSkipList *chunkList,
|
||||
TupleDesc tupleDescriptor)
|
||||
{
|
||||
|
|
@ -614,11 +614,17 @@ SaveStripeSkipList(RelFileLocator relfilelocator, uint64 stripe,
|
|||
uint32 chunkIndex = 0;
|
||||
uint32 columnCount = chunkList->columnCount;
|
||||
|
||||
uint64 storageId = LookupStorageId(relfilelocator);
|
||||
uint64 storageId = LookupStorageId(relid, relfilelocator);
|
||||
Oid columnarChunkOid = ColumnarChunkRelationId();
|
||||
Relation columnarChunk = table_open(columnarChunkOid, RowExclusiveLock);
|
||||
ModifyState *modifyState = StartModifyRelation(columnarChunk);
|
||||
bool pushed_snapshot = false;
|
||||
|
||||
if (!ActiveSnapshotSet())
|
||||
{
|
||||
PushActiveSnapshot(GetTransactionSnapshot());
|
||||
pushed_snapshot = true;
|
||||
}
|
||||
for (columnIndex = 0; columnIndex < columnCount; columnIndex++)
|
||||
{
|
||||
for (chunkIndex = 0; chunkIndex < chunkList->chunkCount; chunkIndex++)
|
||||
|
|
@ -649,21 +655,25 @@ SaveStripeSkipList(RelFileLocator relfilelocator, uint64 stripe,
|
|||
{
|
||||
values[Anum_columnar_chunk_minimum_value - 1] =
|
||||
PointerGetDatum(DatumToBytea(chunk->minimumValue,
|
||||
Attr(tupleDescriptor, columnIndex)));
|
||||
TupleDescAttr(tupleDescriptor,
|
||||
columnIndex)));
|
||||
values[Anum_columnar_chunk_maximum_value - 1] =
|
||||
PointerGetDatum(DatumToBytea(chunk->maximumValue,
|
||||
Attr(tupleDescriptor, columnIndex)));
|
||||
TupleDescAttr(tupleDescriptor,
|
||||
columnIndex)));
|
||||
}
|
||||
else
|
||||
{
|
||||
nulls[Anum_columnar_chunk_minimum_value - 1] = true;
|
||||
nulls[Anum_columnar_chunk_maximum_value - 1] = true;
|
||||
}
|
||||
PushActiveSnapshot(GetTransactionSnapshot());
|
||||
InsertTupleAndEnforceConstraints(modifyState, values, nulls);
|
||||
PopActiveSnapshot();
|
||||
}
|
||||
}
|
||||
if (pushed_snapshot)
|
||||
{
|
||||
PopActiveSnapshot();
|
||||
}
|
||||
|
||||
FinishModifyRelation(modifyState);
|
||||
table_close(columnarChunk, RowExclusiveLock);
|
||||
|
|
@ -674,10 +684,10 @@ SaveStripeSkipList(RelFileLocator relfilelocator, uint64 stripe,
|
|||
* SaveChunkGroups saves the metadata for given chunk groups in columnar.chunk_group.
|
||||
*/
|
||||
void
|
||||
SaveChunkGroups(RelFileLocator relfilelocator, uint64 stripe,
|
||||
SaveChunkGroups(Oid relid, RelFileLocator relfilelocator, uint64 stripe,
|
||||
List *chunkGroupRowCounts)
|
||||
{
|
||||
uint64 storageId = LookupStorageId(relfilelocator);
|
||||
uint64 storageId = LookupStorageId(relid, relfilelocator);
|
||||
Oid columnarChunkGroupOid = ColumnarChunkGroupRelationId();
|
||||
Relation columnarChunkGroup = table_open(columnarChunkGroupOid, RowExclusiveLock);
|
||||
ModifyState *modifyState = StartModifyRelation(columnarChunkGroup);
|
||||
|
|
@ -710,7 +720,7 @@ SaveChunkGroups(RelFileLocator relfilelocator, uint64 stripe,
|
|||
* ReadStripeSkipList fetches chunk metadata for a given stripe.
|
||||
*/
|
||||
StripeSkipList *
|
||||
ReadStripeSkipList(RelFileLocator relfilelocator, uint64 stripe,
|
||||
ReadStripeSkipList(Relation rel, uint64 stripe,
|
||||
TupleDesc tupleDescriptor,
|
||||
uint32 chunkCount, Snapshot snapshot)
|
||||
{
|
||||
|
|
@ -719,7 +729,8 @@ ReadStripeSkipList(RelFileLocator relfilelocator, uint64 stripe,
|
|||
uint32 columnCount = tupleDescriptor->natts;
|
||||
ScanKeyData scanKey[2];
|
||||
|
||||
uint64 storageId = LookupStorageId(relfilelocator);
|
||||
uint64 storageId = LookupStorageId(RelationPrecomputeOid(rel),
|
||||
RelationPhysicalIdentifier_compat(rel));
|
||||
|
||||
Oid columnarChunkOid = ColumnarChunkRelationId();
|
||||
Relation columnarChunk = table_open(columnarChunkOid, AccessShareLock);
|
||||
|
|
@ -808,9 +819,9 @@ ReadStripeSkipList(RelFileLocator relfilelocator, uint64 stripe,
|
|||
datumArray[Anum_columnar_chunk_maximum_value - 1]);
|
||||
|
||||
chunk->minimumValue =
|
||||
ByteaToDatum(minValue, Attr(tupleDescriptor, columnIndex));
|
||||
ByteaToDatum(minValue, TupleDescAttr(tupleDescriptor, columnIndex));
|
||||
chunk->maximumValue =
|
||||
ByteaToDatum(maxValue, Attr(tupleDescriptor, columnIndex));
|
||||
ByteaToDatum(maxValue, TupleDescAttr(tupleDescriptor, columnIndex));
|
||||
|
||||
chunk->hasMinMax = true;
|
||||
}
|
||||
|
|
@ -1263,11 +1274,26 @@ InsertEmptyStripeMetadataRow(uint64 storageId, uint64 stripeId, uint32 columnCou
|
|||
* of the given relfilenode.
|
||||
*/
|
||||
List *
|
||||
StripesForRelfilelocator(RelFileLocator relfilelocator)
|
||||
StripesForRelfilelocator(Relation rel)
|
||||
{
|
||||
uint64 storageId = LookupStorageId(relfilelocator);
|
||||
uint64 storageId = LookupStorageId(RelationPrecomputeOid(rel),
|
||||
RelationPhysicalIdentifier_compat(rel));
|
||||
|
||||
return ReadDataFileStripeList(storageId, GetTransactionSnapshot());
|
||||
/*
|
||||
* PG18 requires snapshot to be active or registered before it's used
|
||||
* Without this, we hit
|
||||
* Assert(snapshot->regd_count > 0 || snapshot->active_count > 0);
|
||||
* when reading columnar stripes.
|
||||
* Relevant PG18 commit:
|
||||
* 8076c00592e40e8dbd1fce7a98b20d4bf075e4ba
|
||||
*/
|
||||
Snapshot snapshot = RegisterSnapshot(GetTransactionSnapshot());
|
||||
|
||||
List *readDataFileStripeList = ReadDataFileStripeList(storageId, snapshot);
|
||||
|
||||
UnregisterSnapshot(snapshot);
|
||||
|
||||
return readDataFileStripeList;
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -1280,9 +1306,10 @@ StripesForRelfilelocator(RelFileLocator relfilelocator)
|
|||
* returns 0.
|
||||
*/
|
||||
uint64
|
||||
GetHighestUsedAddress(RelFileLocator relfilelocator)
|
||||
GetHighestUsedAddress(Relation rel)
|
||||
{
|
||||
uint64 storageId = LookupStorageId(relfilelocator);
|
||||
uint64 storageId = LookupStorageId(RelationPrecomputeOid(rel),
|
||||
RelationPhysicalIdentifier_compat(rel));
|
||||
|
||||
uint64 highestUsedAddress = 0;
|
||||
uint64 highestUsedId = 0;
|
||||
|
|
@ -1292,6 +1319,24 @@ GetHighestUsedAddress(RelFileLocator relfilelocator)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* In case if relid hasn't been defined yet, we should use RelidByRelfilenumber
|
||||
* to get correct relid value.
|
||||
*
|
||||
* Now it is basically used for temp rels, because since PG18(it was backpatched
|
||||
* through PG13) RelidByRelfilenumber skip temp relations and we should use
|
||||
* alternative ways to get relid value in case of temp objects.
|
||||
*/
|
||||
Oid
|
||||
ColumnarRelationId(Oid relid, RelFileLocator relfilelocator)
|
||||
{
|
||||
return OidIsValid(relid) ? relid : RelidByRelfilenumber(RelationTablespace_compat(
|
||||
relfilelocator),
|
||||
RelationPhysicalIdentifierNumber_compat(
|
||||
relfilelocator));
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetHighestUsedAddressAndId returns the highest used address and id for
|
||||
* the given relfilenode across all active and inactive transactions.
|
||||
|
|
@ -1380,9 +1425,6 @@ static StripeMetadata *
|
|||
UpdateStripeMetadataRow(uint64 storageId, uint64 stripeId, uint64 fileOffset,
|
||||
uint64 dataLength, uint64 rowCount, uint64 chunkCount)
|
||||
{
|
||||
SnapshotData dirtySnapshot;
|
||||
InitDirtySnapshot(dirtySnapshot);
|
||||
|
||||
ScanKeyData scanKey[2];
|
||||
ScanKeyInit(&scanKey[0], Anum_columnar_stripe_storageid,
|
||||
BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(storageId));
|
||||
|
|
@ -1391,23 +1433,16 @@ UpdateStripeMetadataRow(uint64 storageId, uint64 stripeId, uint64 fileOffset,
|
|||
|
||||
Oid columnarStripesOid = ColumnarStripeRelationId();
|
||||
|
||||
#if PG_VERSION_NUM >= 180000
|
||||
|
||||
/* CatalogTupleUpdate performs a normal heap UPDATE → RowExclusiveLock */
|
||||
const LOCKMODE openLockMode = RowExclusiveLock;
|
||||
#else
|
||||
|
||||
/* In‑place update never changed tuple length → AccessShareLock was enough */
|
||||
const LOCKMODE openLockMode = AccessShareLock;
|
||||
#endif
|
||||
|
||||
Relation columnarStripes = table_open(columnarStripesOid, openLockMode);
|
||||
Relation columnarStripes = table_open(columnarStripesOid, AccessShareLock);
|
||||
TupleDesc tupleDescriptor = RelationGetDescr(columnarStripes);
|
||||
|
||||
Oid indexId = ColumnarStripePKeyIndexRelationId();
|
||||
bool indexOk = OidIsValid(indexId);
|
||||
SysScanDesc scanDescriptor = systable_beginscan(columnarStripes, indexId, indexOk,
|
||||
&dirtySnapshot, 2, scanKey);
|
||||
|
||||
void *state;
|
||||
HeapTuple tuple;
|
||||
systable_inplace_update_begin(columnarStripes, indexId, indexOk, NULL,
|
||||
2, scanKey, &tuple, &state);
|
||||
|
||||
static bool loggedSlowMetadataAccessWarning = false;
|
||||
if (!indexOk && !loggedSlowMetadataAccessWarning)
|
||||
|
|
@ -1416,8 +1451,7 @@ UpdateStripeMetadataRow(uint64 storageId, uint64 stripeId, uint64 fileOffset,
|
|||
loggedSlowMetadataAccessWarning = true;
|
||||
}
|
||||
|
||||
HeapTuple oldTuple = systable_getnext(scanDescriptor);
|
||||
if (!HeapTupleIsValid(oldTuple))
|
||||
if (!HeapTupleIsValid(tuple))
|
||||
{
|
||||
ereport(ERROR, (errmsg("attempted to modify an unexpected stripe, "
|
||||
"columnar storage with id=" UINT64_FORMAT
|
||||
|
|
@ -1425,6 +1459,11 @@ UpdateStripeMetadataRow(uint64 storageId, uint64 stripeId, uint64 fileOffset,
|
|||
storageId, stripeId)));
|
||||
}
|
||||
|
||||
/*
|
||||
* systable_inplace_update_finish already doesn't allow changing size of the original
|
||||
* tuple, so we don't allow setting any Datum's to NULL values.
|
||||
*/
|
||||
|
||||
Datum *newValues = (Datum *) palloc(tupleDescriptor->natts * sizeof(Datum));
|
||||
bool *newNulls = (bool *) palloc0(tupleDescriptor->natts * sizeof(bool));
|
||||
bool *update = (bool *) palloc0(tupleDescriptor->natts * sizeof(bool));
|
||||
|
|
@ -1439,43 +1478,21 @@ UpdateStripeMetadataRow(uint64 storageId, uint64 stripeId, uint64 fileOffset,
|
|||
newValues[Anum_columnar_stripe_row_count - 1] = UInt64GetDatum(rowCount);
|
||||
newValues[Anum_columnar_stripe_chunk_count - 1] = Int32GetDatum(chunkCount);
|
||||
|
||||
HeapTuple modifiedTuple = heap_modify_tuple(oldTuple,
|
||||
tupleDescriptor,
|
||||
newValues,
|
||||
newNulls,
|
||||
update);
|
||||
tuple = heap_modify_tuple(tuple,
|
||||
tupleDescriptor,
|
||||
newValues,
|
||||
newNulls,
|
||||
update);
|
||||
|
||||
#if PG_VERSION_NUM < PG_VERSION_18
|
||||
systable_inplace_update_finish(state, tuple);
|
||||
|
||||
/*
|
||||
* heap_inplace_update already doesn't allow changing size of the original
|
||||
* tuple, so we don't allow setting any Datum's to NULL values.
|
||||
*/
|
||||
heap_inplace_update(columnarStripes, modifiedTuple);
|
||||
|
||||
/*
|
||||
* Existing tuple now contains modifications, because we used
|
||||
* heap_inplace_update().
|
||||
*/
|
||||
HeapTuple newTuple = oldTuple;
|
||||
#else
|
||||
|
||||
/* Regular catalog UPDATE keeps indexes in sync */
|
||||
CatalogTupleUpdate(columnarStripes, &oldTuple->t_self, modifiedTuple);
|
||||
HeapTuple newTuple = modifiedTuple;
|
||||
#endif
|
||||
StripeMetadata *modifiedStripeMetadata = BuildStripeMetadata(columnarStripes,
|
||||
tuple);
|
||||
|
||||
CommandCounterIncrement();
|
||||
|
||||
/*
|
||||
* Must not pass modifiedTuple, because BuildStripeMetadata expects a real
|
||||
* heap tuple with MVCC fields.
|
||||
*/
|
||||
StripeMetadata *modifiedStripeMetadata =
|
||||
BuildStripeMetadata(columnarStripes, newTuple);
|
||||
|
||||
systable_endscan(scanDescriptor);
|
||||
table_close(columnarStripes, openLockMode);
|
||||
heap_freetuple(tuple);
|
||||
table_close(columnarStripes, AccessShareLock);
|
||||
|
||||
pfree(newValues);
|
||||
pfree(newNulls);
|
||||
|
|
@ -1595,7 +1612,7 @@ BuildStripeMetadata(Relation columnarStripes, HeapTuple heapTuple)
|
|||
* metadata tables.
|
||||
*/
|
||||
void
|
||||
DeleteMetadataRows(RelFileLocator relfilelocator)
|
||||
DeleteMetadataRows(Relation rel)
|
||||
{
|
||||
/*
|
||||
* During a restore for binary upgrade, metadata tables and indexes may or
|
||||
|
|
@ -1606,7 +1623,8 @@ DeleteMetadataRows(RelFileLocator relfilelocator)
|
|||
return;
|
||||
}
|
||||
|
||||
uint64 storageId = LookupStorageId(relfilelocator);
|
||||
uint64 storageId = LookupStorageId(RelationPrecomputeOid(rel),
|
||||
RelationPhysicalIdentifier_compat(rel));
|
||||
|
||||
DeleteStorageFromColumnarMetadataTable(ColumnarStripeRelationId(),
|
||||
Anum_columnar_stripe_storageid,
|
||||
|
|
@ -2005,13 +2023,11 @@ ColumnarNamespaceId(void)
|
|||
* false if the relation doesn't have a meta page yet.
|
||||
*/
|
||||
static uint64
|
||||
LookupStorageId(RelFileLocator relfilelocator)
|
||||
LookupStorageId(Oid relid, RelFileLocator relfilelocator)
|
||||
{
|
||||
Oid relationId = RelidByRelfilenumber(RelationTablespace_compat(relfilelocator),
|
||||
RelationPhysicalIdentifierNumber_compat(
|
||||
relfilelocator));
|
||||
relid = ColumnarRelationId(relid, relfilelocator);
|
||||
|
||||
Relation relation = relation_open(relationId, AccessShareLock);
|
||||
Relation relation = relation_open(relid, AccessShareLock);
|
||||
uint64 storageId = ColumnarStorageGetStorageId(relation, false);
|
||||
table_close(relation, AccessShareLock);
|
||||
|
||||
|
|
|
|||
|
|
@ -986,8 +986,7 @@ ColumnarTableRowCount(Relation relation)
|
|||
{
|
||||
ListCell *stripeMetadataCell = NULL;
|
||||
uint64 totalRowCount = 0;
|
||||
List *stripeList = StripesForRelfilelocator(RelationPhysicalIdentifier_compat(
|
||||
relation));
|
||||
List *stripeList = StripesForRelfilelocator(relation);
|
||||
|
||||
foreach(stripeMetadataCell, stripeList)
|
||||
{
|
||||
|
|
@ -1015,8 +1014,7 @@ LoadFilteredStripeBuffers(Relation relation, StripeMetadata *stripeMetadata,
|
|||
|
||||
bool *projectedColumnMask = ProjectedColumnMask(columnCount, projectedColumnList);
|
||||
|
||||
StripeSkipList *stripeSkipList = ReadStripeSkipList(RelationPhysicalIdentifier_compat(
|
||||
relation),
|
||||
StripeSkipList *stripeSkipList = ReadStripeSkipList(relation,
|
||||
stripeMetadata->id,
|
||||
tupleDescriptor,
|
||||
stripeMetadata->chunkCount,
|
||||
|
|
|
|||
|
|
@ -872,7 +872,7 @@ columnar_relation_set_new_filelocator(Relation rel,
|
|||
RelationPhysicalIdentifier_compat(rel)),
|
||||
GetCurrentSubTransactionId());
|
||||
|
||||
DeleteMetadataRows(RelationPhysicalIdentifier_compat(rel));
|
||||
DeleteMetadataRows(rel);
|
||||
}
|
||||
|
||||
*freezeXid = RecentXmin;
|
||||
|
|
@ -897,7 +897,7 @@ columnar_relation_nontransactional_truncate(Relation rel)
|
|||
NonTransactionDropWriteState(RelationPhysicalIdentifierNumber_compat(relfilelocator));
|
||||
|
||||
/* Delete old relfilenode metadata */
|
||||
DeleteMetadataRows(relfilelocator);
|
||||
DeleteMetadataRows(rel);
|
||||
|
||||
/*
|
||||
* No need to set new relfilenode, since the table was created in this
|
||||
|
|
@ -960,8 +960,7 @@ columnar_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap,
|
|||
ColumnarOptions columnarOptions = { 0 };
|
||||
ReadColumnarOptions(OldHeap->rd_id, &columnarOptions);
|
||||
|
||||
ColumnarWriteState *writeState = ColumnarBeginWrite(RelationPhysicalIdentifier_compat(
|
||||
NewHeap),
|
||||
ColumnarWriteState *writeState = ColumnarBeginWrite(NewHeap,
|
||||
columnarOptions,
|
||||
targetDesc);
|
||||
|
||||
|
|
@ -1012,7 +1011,7 @@ NeededColumnsList(TupleDesc tupdesc, Bitmapset *attr_needed)
|
|||
|
||||
for (int i = 0; i < tupdesc->natts; i++)
|
||||
{
|
||||
if (Attr(tupdesc, i)->attisdropped)
|
||||
if (TupleDescAttr(tupdesc, i)->attisdropped)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
|
@ -1036,8 +1035,7 @@ NeededColumnsList(TupleDesc tupdesc, Bitmapset *attr_needed)
|
|||
static uint64
|
||||
ColumnarTableTupleCount(Relation relation)
|
||||
{
|
||||
List *stripeList = StripesForRelfilelocator(RelationPhysicalIdentifier_compat(
|
||||
relation));
|
||||
List *stripeList = StripesForRelfilelocator(relation);
|
||||
uint64 tupleCount = 0;
|
||||
|
||||
ListCell *lc = NULL;
|
||||
|
|
@ -1228,7 +1226,6 @@ static void
|
|||
LogRelationStats(Relation rel, int elevel)
|
||||
{
|
||||
ListCell *stripeMetadataCell = NULL;
|
||||
RelFileLocator relfilelocator = RelationPhysicalIdentifier_compat(rel);
|
||||
StringInfo infoBuf = makeStringInfo();
|
||||
|
||||
int compressionStats[COMPRESSION_COUNT] = { 0 };
|
||||
|
|
@ -1239,19 +1236,23 @@ LogRelationStats(Relation rel, int elevel)
|
|||
uint64 droppedChunksWithData = 0;
|
||||
uint64 totalDecompressedLength = 0;
|
||||
|
||||
List *stripeList = StripesForRelfilelocator(relfilelocator);
|
||||
List *stripeList = StripesForRelfilelocator(rel);
|
||||
int stripeCount = list_length(stripeList);
|
||||
|
||||
foreach(stripeMetadataCell, stripeList)
|
||||
{
|
||||
StripeMetadata *stripe = lfirst(stripeMetadataCell);
|
||||
StripeSkipList *skiplist = ReadStripeSkipList(relfilelocator, stripe->id,
|
||||
|
||||
Snapshot snapshot = RegisterSnapshot(GetTransactionSnapshot());
|
||||
StripeSkipList *skiplist = ReadStripeSkipList(rel, stripe->id,
|
||||
RelationGetDescr(rel),
|
||||
stripe->chunkCount,
|
||||
GetTransactionSnapshot());
|
||||
snapshot);
|
||||
UnregisterSnapshot(snapshot);
|
||||
|
||||
for (uint32 column = 0; column < skiplist->columnCount; column++)
|
||||
{
|
||||
bool attrDropped = Attr(tupdesc, column)->attisdropped;
|
||||
bool attrDropped = TupleDescAttr(tupdesc, column)->attisdropped;
|
||||
for (uint32 chunk = 0; chunk < skiplist->chunkCount; chunk++)
|
||||
{
|
||||
ColumnChunkSkipNode *skipnode =
|
||||
|
|
@ -1381,8 +1382,7 @@ TruncateColumnar(Relation rel, int elevel)
|
|||
* new stripes be added beyond highestPhysicalAddress while
|
||||
* we're truncating.
|
||||
*/
|
||||
uint64 newDataReservation = Max(GetHighestUsedAddress(
|
||||
RelationPhysicalIdentifier_compat(rel)) + 1,
|
||||
uint64 newDataReservation = Max(GetHighestUsedAddress(rel) + 1,
|
||||
ColumnarFirstLogicalOffset);
|
||||
|
||||
BlockNumber old_rel_pages = smgrnblocks(RelationGetSmgr(rel), MAIN_FORKNUM);
|
||||
|
|
@ -2150,7 +2150,7 @@ ColumnarTableDropHook(Oid relid)
|
|||
Relation rel = table_open(relid, AccessExclusiveLock);
|
||||
RelFileLocator relfilelocator = RelationPhysicalIdentifier_compat(rel);
|
||||
|
||||
DeleteMetadataRows(relfilelocator);
|
||||
DeleteMetadataRows(rel);
|
||||
DeleteColumnarTableOptions(rel->rd_id, true);
|
||||
|
||||
MarkRelfilenumberDropped(RelationPhysicalIdentifierNumber_compat(relfilelocator),
|
||||
|
|
@ -2634,7 +2634,7 @@ detoast_values(TupleDesc tupleDesc, Datum *orig_values, bool *isnull)
|
|||
|
||||
for (int i = 0; i < tupleDesc->natts; i++)
|
||||
{
|
||||
if (!isnull[i] && Attr(tupleDesc, i)->attlen == -1 &&
|
||||
if (!isnull[i] && TupleDescAttr(tupleDesc, i)->attlen == -1 &&
|
||||
VARATT_IS_EXTENDED(values[i]))
|
||||
{
|
||||
/* make a copy */
|
||||
|
|
|
|||
|
|
@ -48,6 +48,12 @@ struct ColumnarWriteState
|
|||
FmgrInfo **comparisonFunctionArray;
|
||||
RelFileLocator relfilelocator;
|
||||
|
||||
/*
|
||||
* We can't rely on RelidByRelfilenumber for temp tables since
|
||||
* PG18(it was backpatched through PG13).
|
||||
*/
|
||||
Oid temp_relid;
|
||||
|
||||
MemoryContext stripeWriteContext;
|
||||
MemoryContext perTupleContext;
|
||||
StripeBuffers *stripeBuffers;
|
||||
|
|
@ -93,10 +99,12 @@ static StringInfo CopyStringInfo(StringInfo sourceString);
|
|||
* data load operation.
|
||||
*/
|
||||
ColumnarWriteState *
|
||||
ColumnarBeginWrite(RelFileLocator relfilelocator,
|
||||
ColumnarBeginWrite(Relation rel,
|
||||
ColumnarOptions options,
|
||||
TupleDesc tupleDescriptor)
|
||||
{
|
||||
RelFileLocator relfilelocator = RelationPhysicalIdentifier_compat(rel);
|
||||
|
||||
/* get comparison function pointers for each of the columns */
|
||||
uint32 columnCount = tupleDescriptor->natts;
|
||||
FmgrInfo **comparisonFunctionArray = palloc0(columnCount * sizeof(FmgrInfo *));
|
||||
|
|
@ -134,6 +142,7 @@ ColumnarBeginWrite(RelFileLocator relfilelocator,
|
|||
|
||||
ColumnarWriteState *writeState = palloc0(sizeof(ColumnarWriteState));
|
||||
writeState->relfilelocator = relfilelocator;
|
||||
writeState->temp_relid = RelationPrecomputeOid(rel);
|
||||
writeState->options = options;
|
||||
writeState->tupleDescriptor = CreateTupleDescCopy(tupleDescriptor);
|
||||
writeState->comparisonFunctionArray = comparisonFunctionArray;
|
||||
|
|
@ -183,10 +192,9 @@ ColumnarWriteRow(ColumnarWriteState *writeState, Datum *columnValues, bool *colu
|
|||
writeState->stripeSkipList = stripeSkipList;
|
||||
writeState->compressionBuffer = makeStringInfo();
|
||||
|
||||
Oid relationId = RelidByRelfilenumber(RelationTablespace_compat(
|
||||
writeState->relfilelocator),
|
||||
RelationPhysicalIdentifierNumber_compat(
|
||||
writeState->relfilelocator));
|
||||
Oid relationId = ColumnarRelationId(writeState->temp_relid,
|
||||
writeState->relfilelocator);
|
||||
|
||||
Relation relation = relation_open(relationId, NoLock);
|
||||
writeState->emptyStripeReservation =
|
||||
ReserveEmptyStripe(relation, columnCount, chunkRowCount,
|
||||
|
|
@ -404,10 +412,9 @@ FlushStripe(ColumnarWriteState *writeState)
|
|||
|
||||
elog(DEBUG1, "Flushing Stripe of size %d", stripeBuffers->rowCount);
|
||||
|
||||
Oid relationId = RelidByRelfilenumber(RelationTablespace_compat(
|
||||
writeState->relfilelocator),
|
||||
RelationPhysicalIdentifierNumber_compat(
|
||||
writeState->relfilelocator));
|
||||
Oid relationId = ColumnarRelationId(writeState->temp_relid,
|
||||
writeState->relfilelocator);
|
||||
|
||||
Relation relation = relation_open(relationId, NoLock);
|
||||
|
||||
/*
|
||||
|
|
@ -499,10 +506,12 @@ FlushStripe(ColumnarWriteState *writeState)
|
|||
}
|
||||
}
|
||||
|
||||
SaveChunkGroups(writeState->relfilelocator,
|
||||
SaveChunkGroups(writeState->temp_relid,
|
||||
writeState->relfilelocator,
|
||||
stripeMetadata->id,
|
||||
writeState->chunkGroupRowCounts);
|
||||
SaveStripeSkipList(writeState->relfilelocator,
|
||||
SaveStripeSkipList(writeState->temp_relid,
|
||||
writeState->relfilelocator,
|
||||
stripeMetadata->id,
|
||||
stripeSkipList, tupleDescriptor);
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,2 @@
|
|||
-- citus_columnar--13.2-1--14.0-1
|
||||
-- bump version to 14.0-1
|
||||
|
|
@ -0,0 +1,2 @@
|
|||
-- citus_columnar--14.0-1--13.2-1
|
||||
-- downgrade version to 13.2-1
|
||||
|
|
@ -191,8 +191,7 @@ columnar_init_write_state(Relation relation, TupleDesc tupdesc,
|
|||
ReadColumnarOptions(tupSlotRelationId, &columnarOptions);
|
||||
|
||||
SubXidWriteState *stackEntry = palloc0(sizeof(SubXidWriteState));
|
||||
stackEntry->writeState = ColumnarBeginWrite(RelationPhysicalIdentifier_compat(
|
||||
relation),
|
||||
stackEntry->writeState = ColumnarBeginWrite(relation,
|
||||
columnarOptions,
|
||||
tupdesc);
|
||||
stackEntry->subXid = currentSubXid;
|
||||
|
|
|
|||
|
|
@ -355,6 +355,15 @@ DEBUG: Total number of commands sent over the session 8: 1 to node localhost:97
|
|||
(0 rows)
|
||||
```
|
||||
|
||||
### Delaying the Fast Path Plan
|
||||
|
||||
As of Citus 13.2, if it can be determined at plan-time that a fast path query is against a local shard then a shortcut can be taken so that deparse and parse/plan of the shard query is avoided. Citus must be in MX mode and the shard must be local to the Citus node processing the query. If so, the OID of the distributed table is replaced by the OID of the shard in the parse tree. The parse tree is then given to the Postgres planner which returns a plan that is stored in the distributed plan's task. That plan can be repeatedly used by the local executor (described in the next section), avoiding the need to deparse and plan the shard query on each execution.
|
||||
|
||||
We call this delayed fast path planning because if a query is eligible for fast path planning then `FastPathPlanner()` is delayed if the following properties hold:
|
||||
- The query is a SELECT or UPDATE on a distributed table (schema or column sharded) or Citus managed local table
|
||||
- The query has no volatile functions
|
||||
|
||||
If so, then `FastPathRouterQuery()` sets a flag indicating that making the fast path plan should be delayed until after the worker job has been created. At that point the router planner uses `CheckAndBuildDelayedFastPathPlan()` to see if the task's shard placement is local (and not a dummy placement) and the metadata of the shard table and distributed table are consistent (no DDL in progress on the distributed table). If so the parse tree with OID of the distributed table replaced by the OID of the shard table is fed to `standard_planner()` and the resultant plan is saved in the task. Otherwise, if the worker job has been marked for deferred pruning or the shard is not local or the shard is local but it's not safe to swap OIDs, then `CheckAndBuildDelayedFastPathPlan()` calls `FastPathPlanner()` to ensure a complete plan context. Reference tables are not currently supported, but this may be relaxed for SELECT statements in the future. Delayed fast path planning can be disabled by turning off `citus.enable_local_fast_path_query_optimization` (it is on by default).
|
||||
|
||||
## Router Planner in Citus
|
||||
|
||||
|
|
@ -788,14 +797,13 @@ WHERE l.user_id = o.user_id AND o.primary_key = 55;
|
|||
|
||||
|
||||
|
||||
### Ref table LEFT JOIN distributed table JOINs via recursive planning
|
||||
### Outer joins between reference and distributed tables
|
||||
|
||||
Very much like local-distributed table joins, Citus can't push down queries formatted as:
|
||||
In general, when the outer side of an outer join is a recurring tuple (e.g., reference table, intermediate results, or set returning functions), it is not safe to push down the join.
|
||||
```sql
|
||||
"... ref_table LEFT JOIN distributed_table ..."
|
||||
"... distributed_table RIGHT JOIN ref_table ..."
|
||||
```
|
||||
This is the case when the outer side is a recurring tuple (e.g., reference table, intermediate results, or set returning functions).
|
||||
|
||||
In these situations, Citus recursively plans the "distributed" part of the join. Even though it may seem excessive to recursively plan a distributed table, remember that Citus pushes down the filters and projections. Functions involved here include `RequiredAttrNumbersForRelation()` and `ReplaceRTERelationWithRteSubquery()`.
|
||||
|
||||
The core function handling this logic is `RecursivelyPlanRecurringTupleOuterJoinWalker()`. There are likely numerous optimizations possible (e.g., first pushing down an inner JOIN then an outer join), but these have not been implemented due to their complexity.
|
||||
|
|
@ -819,6 +827,45 @@ DEBUG: Wrapping relation "orders_table" "o" to a subquery
|
|||
DEBUG: generating subplan 45_1 for subquery SELECT order_id, status FROM public.orders_table o WHERE true
|
||||
```
|
||||
|
||||
As of Citus 13.2, under certain conditions, Citus can push down these types of LEFT and RIGHT outer joins by injecting constraints—derived from the shard intervals of distributed tables—into shard queries for the reference table. The eligibility rules for pushdown are defined in `CanPushdownRecurringOuterJoin()`, while the logic for computing and injecting the constraints is implemented in `UpdateWhereClauseToPushdownRecurringOuterJoin()`.
|
||||
|
||||
#### Example Query
|
||||
|
||||
In the example below, Citus pushes down the query by injecting interval constraints on the reference table. The injected constraints are visible in the EXPLAIN output.
|
||||
|
||||
```sql
|
||||
SELECT pc.category_name, count(pt.product_id)
|
||||
FROM product_categories pc
|
||||
LEFT JOIN products_table pt ON pc.category_id = pt.product_id
|
||||
GROUP BY pc.category_name;
|
||||
```
|
||||
|
||||
#### Debug Messages
|
||||
```
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: a push down safe left join with recurring left side
|
||||
```
|
||||
|
||||
#### Explain Output
|
||||
```
|
||||
HashAggregate
|
||||
Group Key: remote_scan.category_name
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Task Count: 32
|
||||
Tasks Shown: One of 32
|
||||
-> Task
|
||||
Node: host=localhost port=9701 dbname=ebru
|
||||
-> HashAggregate
|
||||
Group Key: pc.category_name
|
||||
-> Hash Right Join
|
||||
Hash Cond: (pt.product_id = pc.category_id)
|
||||
-> Seq Scan on products_table_102072 pt
|
||||
-> Hash
|
||||
-> Seq Scan on product_categories_102106 pc
|
||||
Filter: ((category_id IS NULL) OR ((btint4cmp('-2147483648'::integer, hashint8((category_id)::bigint)) < 0) AND (btint4cmp(hashint8((category_id::bigint), '-2013265921'::integer) <= 0)))
|
||||
```
|
||||
|
||||
|
||||
### Recursive Planning When FROM Clause has Reference Table (or Recurring Tuples)
|
||||
|
||||
This section discusses a specific scenario in Citus's recursive query planning: handling queries where the main query's `FROM` clause is recurring, but there are subqueries in the `SELECT` or `WHERE` clauses involving distributed tables.
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
# Citus extension
|
||||
comment = 'Citus distributed database'
|
||||
default_version = '13.2-1'
|
||||
default_version = '14.0-1'
|
||||
module_pathname = '$libdir/citus'
|
||||
relocatable = false
|
||||
schema = pg_catalog
|
||||
|
|
|
|||
|
|
@ -1927,14 +1927,10 @@ GetNonGeneratedStoredColumnNameList(Oid relationId)
|
|||
for (int columnIndex = 0; columnIndex < tupleDescriptor->natts; columnIndex++)
|
||||
{
|
||||
Form_pg_attribute currentColumn = TupleDescAttr(tupleDescriptor, columnIndex);
|
||||
if (currentColumn->attisdropped)
|
||||
{
|
||||
/* skip dropped columns */
|
||||
continue;
|
||||
}
|
||||
|
||||
if (currentColumn->attgenerated == ATTRIBUTE_GENERATED_STORED)
|
||||
if (IsDroppedOrGenerated(currentColumn))
|
||||
{
|
||||
/* skip dropped or generated columns */
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@
|
|||
#include "nodes/parsenodes.h"
|
||||
#include "tcop/utility.h"
|
||||
|
||||
#include "distributed/citus_depended_object.h"
|
||||
#include "distributed/commands.h"
|
||||
#include "distributed/commands/utility_hook.h"
|
||||
#include "distributed/deparser.h"
|
||||
|
|
@ -63,6 +64,13 @@ PostprocessCreateDistributedObjectFromCatalogStmt(Node *stmt, const char *queryS
|
|||
return NIL;
|
||||
}
|
||||
|
||||
if (ops->qualify && DistOpsValidityState(stmt, ops) ==
|
||||
ShouldQualifyAfterLocalCreation)
|
||||
{
|
||||
/* qualify the statement after local creation */
|
||||
ops->qualify(stmt);
|
||||
}
|
||||
|
||||
List *addresses = GetObjectAddressListFromParseTree(stmt, false, true);
|
||||
|
||||
/* the code-path only supports a single object */
|
||||
|
|
|
|||
|
|
@ -175,8 +175,9 @@ static bool DistributionColumnUsesNumericColumnNegativeScale(TupleDesc relationD
|
|||
static int numeric_typmod_scale(int32 typmod);
|
||||
static bool is_valid_numeric_typmod(int32 typmod);
|
||||
|
||||
static bool DistributionColumnUsesGeneratedStoredColumn(TupleDesc relationDesc,
|
||||
Var *distributionColumn);
|
||||
static void DistributionColumnIsGeneratedCheck(TupleDesc relationDesc,
|
||||
Var *distributionColumn,
|
||||
const char *relationName);
|
||||
static bool CanUseExclusiveConnections(Oid relationId, bool localTableEmpty);
|
||||
static uint64 DoCopyFromLocalTableIntoShards(Relation distributedRelation,
|
||||
DestReceiver *copyDest,
|
||||
|
|
@ -2103,13 +2104,10 @@ EnsureRelationCanBeDistributed(Oid relationId, Var *distributionColumn,
|
|||
|
||||
/* verify target relation is not distributed by a generated stored column
|
||||
*/
|
||||
if (distributionMethod != DISTRIBUTE_BY_NONE &&
|
||||
DistributionColumnUsesGeneratedStoredColumn(relationDesc, distributionColumn))
|
||||
if (distributionMethod != DISTRIBUTE_BY_NONE)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot distribute relation: %s", relationName),
|
||||
errdetail("Distribution column must not use GENERATED ALWAYS "
|
||||
"AS (...) STORED.")));
|
||||
DistributionColumnIsGeneratedCheck(relationDesc, distributionColumn,
|
||||
relationName);
|
||||
}
|
||||
|
||||
/* verify target relation is not distributed by a column of type numeric with negative scale */
|
||||
|
|
@ -2829,9 +2827,7 @@ TupleDescColumnNameList(TupleDesc tupleDescriptor)
|
|||
Form_pg_attribute currentColumn = TupleDescAttr(tupleDescriptor, columnIndex);
|
||||
char *columnName = NameStr(currentColumn->attname);
|
||||
|
||||
if (currentColumn->attisdropped ||
|
||||
currentColumn->attgenerated == ATTRIBUTE_GENERATED_STORED
|
||||
)
|
||||
if (IsDroppedOrGenerated(currentColumn))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
|
@ -2893,22 +2889,43 @@ DistributionColumnUsesNumericColumnNegativeScale(TupleDesc relationDesc,
|
|||
|
||||
|
||||
/*
|
||||
* DistributionColumnUsesGeneratedStoredColumn returns whether a given relation uses
|
||||
* GENERATED ALWAYS AS (...) STORED on distribution column
|
||||
* DistributionColumnIsGeneratedCheck throws an error if a given relation uses
|
||||
* GENERATED ALWAYS AS (...) STORED | VIRTUAL on distribution column
|
||||
*/
|
||||
static bool
|
||||
DistributionColumnUsesGeneratedStoredColumn(TupleDesc relationDesc,
|
||||
Var *distributionColumn)
|
||||
static void
|
||||
DistributionColumnIsGeneratedCheck(TupleDesc relationDesc,
|
||||
Var *distributionColumn,
|
||||
const char *relationName)
|
||||
{
|
||||
Form_pg_attribute attributeForm = TupleDescAttr(relationDesc,
|
||||
distributionColumn->varattno - 1);
|
||||
|
||||
if (attributeForm->attgenerated == ATTRIBUTE_GENERATED_STORED)
|
||||
switch (attributeForm->attgenerated)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
case ATTRIBUTE_GENERATED_STORED:
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot distribute relation: %s", relationName),
|
||||
errdetail("Distribution column must not use GENERATED ALWAYS "
|
||||
"AS (...) STORED.")));
|
||||
break;
|
||||
}
|
||||
|
||||
return false;
|
||||
#if PG_VERSION_NUM >= PG_VERSION_18
|
||||
case ATTRIBUTE_GENERATED_VIRTUAL:
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot distribute relation: %s", relationName),
|
||||
errdetail("Distribution column must not use GENERATED ALWAYS "
|
||||
"AS (...) VIRTUAL.")));
|
||||
break;
|
||||
}
|
||||
|
||||
#endif
|
||||
default:
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -854,8 +854,11 @@ PostprocessIndexStmt(Node *node, const char *queryString)
|
|||
table_close(relation, NoLock);
|
||||
index_close(indexRelation, NoLock);
|
||||
|
||||
PushActiveSnapshot(GetTransactionSnapshot());
|
||||
|
||||
/* mark index as invalid, in-place (cannot be rolled back) */
|
||||
index_set_state_flags(indexRelationId, INDEX_DROP_CLEAR_VALID);
|
||||
PopActiveSnapshot();
|
||||
|
||||
/* re-open a transaction command from here on out */
|
||||
CommitTransactionCommand();
|
||||
|
|
@ -1370,8 +1373,11 @@ MarkIndexValid(IndexStmt *indexStmt)
|
|||
schemaId);
|
||||
Relation indexRelation = index_open(indexRelationId, RowExclusiveLock);
|
||||
|
||||
PushActiveSnapshot(GetTransactionSnapshot());
|
||||
|
||||
/* mark index as valid, in-place (cannot be rolled back) */
|
||||
index_set_state_flags(indexRelationId, INDEX_CREATE_SET_VALID);
|
||||
PopActiveSnapshot();
|
||||
|
||||
table_close(relation, NoLock);
|
||||
index_close(indexRelation, NoLock);
|
||||
|
|
|
|||
|
|
@ -350,7 +350,6 @@ static void LogLocalCopyToRelationExecution(uint64 shardId);
|
|||
static void LogLocalCopyToFileExecution(uint64 shardId);
|
||||
static void ErrorIfMergeInCopy(CopyStmt *copyStatement);
|
||||
|
||||
|
||||
/* exports for SQL callable functions */
|
||||
PG_FUNCTION_INFO_V1(citus_text_send_as_jsonb);
|
||||
|
||||
|
|
@ -484,9 +483,7 @@ CopyToExistingShards(CopyStmt *copyStatement, QueryCompletion *completionTag)
|
|||
Form_pg_attribute currentColumn = TupleDescAttr(tupleDescriptor, columnIndex);
|
||||
char *columnName = NameStr(currentColumn->attname);
|
||||
|
||||
if (currentColumn->attisdropped ||
|
||||
currentColumn->attgenerated == ATTRIBUTE_GENERATED_STORED
|
||||
)
|
||||
if (IsDroppedOrGenerated(currentColumn))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
|
@ -804,9 +801,7 @@ CanUseBinaryCopyFormat(TupleDesc tupleDescription)
|
|||
{
|
||||
Form_pg_attribute currentColumn = TupleDescAttr(tupleDescription, columnIndex);
|
||||
|
||||
if (currentColumn->attisdropped ||
|
||||
currentColumn->attgenerated == ATTRIBUTE_GENERATED_STORED
|
||||
)
|
||||
if (IsDroppedOrGenerated(currentColumn))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
|
@ -1316,9 +1311,7 @@ TypeArrayFromTupleDescriptor(TupleDesc tupleDescriptor)
|
|||
for (int columnIndex = 0; columnIndex < columnCount; columnIndex++)
|
||||
{
|
||||
Form_pg_attribute attr = TupleDescAttr(tupleDescriptor, columnIndex);
|
||||
if (attr->attisdropped ||
|
||||
attr->attgenerated == ATTRIBUTE_GENERATED_STORED
|
||||
)
|
||||
if (IsDroppedOrGenerated(attr))
|
||||
{
|
||||
typeArray[columnIndex] = InvalidOid;
|
||||
}
|
||||
|
|
@ -1486,9 +1479,7 @@ AppendCopyRowData(Datum *valueArray, bool *isNullArray, TupleDesc rowDescriptor,
|
|||
value = CoerceColumnValue(value, &columnCoercionPaths[columnIndex]);
|
||||
}
|
||||
|
||||
if (currentColumn->attisdropped ||
|
||||
currentColumn->attgenerated == ATTRIBUTE_GENERATED_STORED
|
||||
)
|
||||
if (IsDroppedOrGenerated(currentColumn))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
|
@ -1607,9 +1598,7 @@ AvailableColumnCount(TupleDesc tupleDescriptor)
|
|||
{
|
||||
Form_pg_attribute currentColumn = TupleDescAttr(tupleDescriptor, columnIndex);
|
||||
|
||||
if (!currentColumn->attisdropped &&
|
||||
currentColumn->attgenerated != ATTRIBUTE_GENERATED_STORED
|
||||
)
|
||||
if (!IsDroppedOrGenerated(currentColumn))
|
||||
{
|
||||
columnCount++;
|
||||
}
|
||||
|
|
@ -3999,3 +3988,20 @@ UnclaimCopyConnections(List *connectionStateList)
|
|||
UnclaimConnection(connectionState->connection);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* IsDroppedOrGenerated - helper function for determining if an attribute is
|
||||
* dropped or generated. Used by COPY and Citus DDL to skip such columns.
|
||||
*/
|
||||
inline bool
|
||||
IsDroppedOrGenerated(Form_pg_attribute attr)
|
||||
{
|
||||
/*
|
||||
* If the "is dropped" flag is true or the generated column flag
|
||||
* is not the default nul character (in which case its value is 's'
|
||||
* for ATTRIBUTE_GENERATED_STORED or possibly 'v' with PG18+ for
|
||||
* ATTRIBUTE_GENERATED_VIRTUAL) then return true.
|
||||
*/
|
||||
return attr->attisdropped || (attr->attgenerated != '\0');
|
||||
}
|
||||
|
|
|
|||
|
|
@ -196,6 +196,27 @@ BuildCreatePublicationStmt(Oid publicationId)
|
|||
-1);
|
||||
createPubStmt->options = lappend(createPubStmt->options, pubViaRootOption);
|
||||
|
||||
/* WITH (publish_generated_columns = ...) option (PG18+) */
|
||||
#if PG_VERSION_NUM >= PG_VERSION_18
|
||||
if (publicationForm->pubgencols == 's') /* stored */
|
||||
{
|
||||
DefElem *pubGenColsOption =
|
||||
makeDefElem("publish_generated_columns",
|
||||
(Node *) makeString("stored"),
|
||||
-1);
|
||||
|
||||
createPubStmt->options =
|
||||
lappend(createPubStmt->options, pubGenColsOption);
|
||||
}
|
||||
else if (publicationForm->pubgencols != 'n') /* 'n' = none (default) */
|
||||
{
|
||||
ereport(ERROR,
|
||||
(errmsg("unexpected pubgencols value '%c' for publication %u",
|
||||
publicationForm->pubgencols, publicationId)));
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/* WITH (publish = 'insert, update, delete, truncate') option */
|
||||
List *publishList = NIL;
|
||||
|
||||
|
|
|
|||
|
|
@ -177,8 +177,7 @@ ExtractDefaultColumnsAndOwnedSequences(Oid relationId, List **columnNameList,
|
|||
{
|
||||
Form_pg_attribute attributeForm = TupleDescAttr(tupleDescriptor, attributeIndex);
|
||||
|
||||
if (attributeForm->attisdropped ||
|
||||
attributeForm->attgenerated == ATTRIBUTE_GENERATED_STORED)
|
||||
if (IsDroppedOrGenerated(attributeForm))
|
||||
{
|
||||
/* skip dropped columns and columns with GENERATED AS ALWAYS expressions */
|
||||
continue;
|
||||
|
|
|
|||
|
|
@ -69,7 +69,15 @@ PreprocessCreateStatisticsStmt(Node *node, const char *queryString,
|
|||
{
|
||||
CreateStatsStmt *stmt = castNode(CreateStatsStmt, node);
|
||||
|
||||
RangeVar *relation = (RangeVar *) linitial(stmt->relations);
|
||||
Node *relationNode = (Node *) linitial(stmt->relations);
|
||||
|
||||
if (!IsA(relationNode, RangeVar))
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
RangeVar *relation = (RangeVar *) relationNode;
|
||||
|
||||
Oid relationId = RangeVarGetRelid(relation, ShareUpdateExclusiveLock, false);
|
||||
|
||||
if (!IsCitusTable(relationId) || !ShouldPropagate())
|
||||
|
|
|
|||
|
|
@ -48,21 +48,27 @@ typedef struct CitusVacuumParams
|
|||
#endif
|
||||
} CitusVacuumParams;
|
||||
|
||||
/*
|
||||
* Information we track per VACUUM/ANALYZE target relation.
|
||||
*/
|
||||
typedef struct CitusVacuumRelation
|
||||
{
|
||||
VacuumRelation *vacuumRelation;
|
||||
Oid relationId;
|
||||
} CitusVacuumRelation;
|
||||
|
||||
/* Local functions forward declarations for processing distributed table commands */
|
||||
static bool IsDistributedVacuumStmt(List *vacuumRelationIdList);
|
||||
static bool IsDistributedVacuumStmt(List *vacuumRelationList);
|
||||
static List * VacuumTaskList(Oid relationId, CitusVacuumParams vacuumParams,
|
||||
List *vacuumColumnList);
|
||||
static char * DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams);
|
||||
static char * DeparseVacuumColumnNames(List *columnNameList);
|
||||
static List * VacuumColumnList(VacuumStmt *vacuumStmt, int relationIndex);
|
||||
static List * ExtractVacuumTargetRels(VacuumStmt *vacuumStmt);
|
||||
static void ExecuteVacuumOnDistributedTables(VacuumStmt *vacuumStmt, List *relationIdList,
|
||||
static void ExecuteVacuumOnDistributedTables(VacuumStmt *vacuumStmt, List *relationList,
|
||||
CitusVacuumParams vacuumParams);
|
||||
static void ExecuteUnqualifiedVacuumTasks(VacuumStmt *vacuumStmt,
|
||||
CitusVacuumParams vacuumParams);
|
||||
static CitusVacuumParams VacuumStmtParams(VacuumStmt *vacstmt);
|
||||
static List * VacuumRelationIdList(VacuumStmt *vacuumStmt, CitusVacuumParams
|
||||
vacuumParams);
|
||||
static List * VacuumRelationList(VacuumStmt *vacuumStmt, CitusVacuumParams vacuumParams);
|
||||
|
||||
/*
|
||||
* PostprocessVacuumStmt processes vacuum statements that may need propagation to
|
||||
|
|
@ -97,7 +103,7 @@ PostprocessVacuumStmt(Node *node, const char *vacuumCommand)
|
|||
* when no table is specified propagate the command as it is;
|
||||
* otherwise, only propagate when there is at least 1 citus table
|
||||
*/
|
||||
List *relationIdList = VacuumRelationIdList(vacuumStmt, vacuumParams);
|
||||
List *vacuumRelationList = VacuumRelationList(vacuumStmt, vacuumParams);
|
||||
|
||||
if (list_length(vacuumStmt->rels) == 0)
|
||||
{
|
||||
|
|
@ -105,11 +111,11 @@ PostprocessVacuumStmt(Node *node, const char *vacuumCommand)
|
|||
|
||||
ExecuteUnqualifiedVacuumTasks(vacuumStmt, vacuumParams);
|
||||
}
|
||||
else if (IsDistributedVacuumStmt(relationIdList))
|
||||
else if (IsDistributedVacuumStmt(vacuumRelationList))
|
||||
{
|
||||
/* there is at least 1 citus table specified */
|
||||
|
||||
ExecuteVacuumOnDistributedTables(vacuumStmt, relationIdList,
|
||||
ExecuteVacuumOnDistributedTables(vacuumStmt, vacuumRelationList,
|
||||
vacuumParams);
|
||||
}
|
||||
|
||||
|
|
@ -120,39 +126,58 @@ PostprocessVacuumStmt(Node *node, const char *vacuumCommand)
|
|||
|
||||
|
||||
/*
|
||||
* VacuumRelationIdList returns the oid of the relations in the given vacuum statement.
|
||||
* VacuumRelationList returns the list of relations in the given vacuum statement,
|
||||
* along with their resolved Oids (if they can be locked).
|
||||
*/
|
||||
static List *
|
||||
VacuumRelationIdList(VacuumStmt *vacuumStmt, CitusVacuumParams vacuumParams)
|
||||
VacuumRelationList(VacuumStmt *vacuumStmt, CitusVacuumParams vacuumParams)
|
||||
{
|
||||
LOCKMODE lockMode = (vacuumParams.options & VACOPT_FULL) ? AccessExclusiveLock :
|
||||
ShareUpdateExclusiveLock;
|
||||
|
||||
bool skipLocked = (vacuumParams.options & VACOPT_SKIP_LOCKED);
|
||||
|
||||
List *vacuumRelationList = ExtractVacuumTargetRels(vacuumStmt);
|
||||
List *relationList = NIL;
|
||||
|
||||
List *relationIdList = NIL;
|
||||
|
||||
RangeVar *vacuumRelation = NULL;
|
||||
foreach_declared_ptr(vacuumRelation, vacuumRelationList)
|
||||
VacuumRelation *vacuumRelation = NULL;
|
||||
foreach_declared_ptr(vacuumRelation, vacuumStmt->rels)
|
||||
{
|
||||
Oid relationId = InvalidOid;
|
||||
|
||||
/*
|
||||
* If skip_locked option is enabled, we are skipping that relation
|
||||
* if the lock for it is currently not available; else, we get the lock.
|
||||
* if the lock for it is currently not available; otherwise, we get the lock.
|
||||
*/
|
||||
Oid relationId = RangeVarGetRelidExtended(vacuumRelation,
|
||||
if (vacuumRelation->relation)
|
||||
{
|
||||
relationId = RangeVarGetRelidExtended(vacuumRelation->relation,
|
||||
lockMode,
|
||||
skipLocked ? RVR_SKIP_LOCKED : 0, NULL,
|
||||
NULL);
|
||||
}
|
||||
else if (OidIsValid(vacuumRelation->oid))
|
||||
{
|
||||
/* fall back to the Oid directly when provided */
|
||||
if (!skipLocked || ConditionalLockRelationOid(vacuumRelation->oid, lockMode))
|
||||
{
|
||||
if (!skipLocked)
|
||||
{
|
||||
LockRelationOid(vacuumRelation->oid, lockMode);
|
||||
}
|
||||
relationId = vacuumRelation->oid;
|
||||
}
|
||||
}
|
||||
|
||||
if (OidIsValid(relationId))
|
||||
{
|
||||
relationIdList = lappend_oid(relationIdList, relationId);
|
||||
CitusVacuumRelation *relation = palloc(sizeof(CitusVacuumRelation));
|
||||
relation->vacuumRelation = vacuumRelation;
|
||||
relation->relationId = relationId;
|
||||
relationList = lappend(relationList, relation);
|
||||
}
|
||||
}
|
||||
|
||||
return relationIdList;
|
||||
return relationList;
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -161,12 +186,13 @@ VacuumRelationIdList(VacuumStmt *vacuumStmt, CitusVacuumParams vacuumParams)
|
|||
* otherwise, it returns false.
|
||||
*/
|
||||
static bool
|
||||
IsDistributedVacuumStmt(List *vacuumRelationIdList)
|
||||
IsDistributedVacuumStmt(List *vacuumRelationList)
|
||||
{
|
||||
Oid relationId = InvalidOid;
|
||||
foreach_declared_oid(relationId, vacuumRelationIdList)
|
||||
CitusVacuumRelation *vacuumRelation = NULL;
|
||||
foreach_declared_ptr(vacuumRelation, vacuumRelationList)
|
||||
{
|
||||
if (OidIsValid(relationId) && IsCitusTable(relationId))
|
||||
if (OidIsValid(vacuumRelation->relationId) &&
|
||||
IsCitusTable(vacuumRelation->relationId))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
|
@ -181,24 +207,31 @@ IsDistributedVacuumStmt(List *vacuumRelationIdList)
|
|||
* if they are citus tables.
|
||||
*/
|
||||
static void
|
||||
ExecuteVacuumOnDistributedTables(VacuumStmt *vacuumStmt, List *relationIdList,
|
||||
ExecuteVacuumOnDistributedTables(VacuumStmt *vacuumStmt, List *relationList,
|
||||
CitusVacuumParams vacuumParams)
|
||||
{
|
||||
int relationIndex = 0;
|
||||
|
||||
Oid relationId = InvalidOid;
|
||||
foreach_declared_oid(relationId, relationIdList)
|
||||
CitusVacuumRelation *vacuumRelationEntry = NULL;
|
||||
foreach_declared_ptr(vacuumRelationEntry, relationList)
|
||||
{
|
||||
Oid relationId = vacuumRelationEntry->relationId;
|
||||
VacuumRelation *vacuumRelation = vacuumRelationEntry->vacuumRelation;
|
||||
|
||||
RangeVar *relation = vacuumRelation->relation;
|
||||
if (relation != NULL && !relation->inh)
|
||||
{
|
||||
/* ONLY specified, so don't recurse to shard placements */
|
||||
continue;
|
||||
}
|
||||
|
||||
if (IsCitusTable(relationId))
|
||||
{
|
||||
List *vacuumColumnList = VacuumColumnList(vacuumStmt, relationIndex);
|
||||
List *vacuumColumnList = vacuumRelation->va_cols;
|
||||
List *taskList = VacuumTaskList(relationId, vacuumParams, vacuumColumnList);
|
||||
|
||||
/* local execution is not implemented for VACUUM commands */
|
||||
bool localExecutionSupported = false;
|
||||
ExecuteUtilityTaskList(taskList, localExecutionSupported);
|
||||
}
|
||||
relationIndex++;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -484,39 +517,6 @@ DeparseVacuumColumnNames(List *columnNameList)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* VacuumColumnList returns list of columns from relation
|
||||
* in the vacuum statement at specified relationIndex.
|
||||
*/
|
||||
static List *
|
||||
VacuumColumnList(VacuumStmt *vacuumStmt, int relationIndex)
|
||||
{
|
||||
VacuumRelation *vacuumRelation = (VacuumRelation *) list_nth(vacuumStmt->rels,
|
||||
relationIndex);
|
||||
|
||||
return vacuumRelation->va_cols;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ExtractVacuumTargetRels returns list of target
|
||||
* relations from vacuum statement.
|
||||
*/
|
||||
static List *
|
||||
ExtractVacuumTargetRels(VacuumStmt *vacuumStmt)
|
||||
{
|
||||
List *vacuumList = NIL;
|
||||
|
||||
VacuumRelation *vacuumRelation = NULL;
|
||||
foreach_declared_ptr(vacuumRelation, vacuumStmt->rels)
|
||||
{
|
||||
vacuumList = lappend(vacuumList, vacuumRelation->relation);
|
||||
}
|
||||
|
||||
return vacuumList;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* VacuumStmtParams returns a CitusVacuumParams based on the supplied VacuumStmt.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -471,6 +471,13 @@ pg_get_tableschemadef_string(Oid tableRelationId, IncludeSequenceDefaults
|
|||
appendStringInfo(&buffer, " GENERATED ALWAYS AS (%s) STORED",
|
||||
defaultString);
|
||||
}
|
||||
#if PG_VERSION_NUM >= PG_VERSION_18
|
||||
else if (attributeForm->attgenerated == ATTRIBUTE_GENERATED_VIRTUAL)
|
||||
{
|
||||
appendStringInfo(&buffer, " GENERATED ALWAYS AS (%s) VIRTUAL",
|
||||
defaultString);
|
||||
}
|
||||
#endif
|
||||
else
|
||||
{
|
||||
Oid seqOid = GetSequenceOid(tableRelationId, defaultValue->adnum);
|
||||
|
|
@ -547,6 +554,13 @@ pg_get_tableschemadef_string(Oid tableRelationId, IncludeSequenceDefaults
|
|||
appendStringInfoString(&buffer, "(");
|
||||
appendStringInfoString(&buffer, checkString);
|
||||
appendStringInfoString(&buffer, ")");
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_18
|
||||
if (!checkConstraint->ccenforced)
|
||||
{
|
||||
appendStringInfoString(&buffer, " NOT ENFORCED");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/* close create table's outer parentheses */
|
||||
|
|
|
|||
|
|
@ -649,13 +649,18 @@ AppendAlterTableCmdAddColumn(StringInfo buf, AlterTableCmd *alterTableCmd,
|
|||
}
|
||||
else if (constraint->contype == CONSTR_GENERATED)
|
||||
{
|
||||
char attgenerated = 's';
|
||||
appendStringInfo(buf, " GENERATED %s AS (%s) STORED",
|
||||
char attgenerated = ATTRIBUTE_GENERATED_STORED;
|
||||
#if PG_VERSION_NUM >= PG_VERSION_18
|
||||
attgenerated = constraint->generated_kind;
|
||||
#endif
|
||||
appendStringInfo(buf, " GENERATED %s AS (%s) %s",
|
||||
GeneratedWhenStr(constraint->generated_when),
|
||||
DeparseRawExprForColumnDefault(relationId, typeOid, typmod,
|
||||
columnDefinition->colname,
|
||||
attgenerated,
|
||||
constraint->raw_expr));
|
||||
constraint->raw_expr),
|
||||
(attgenerated == ATTRIBUTE_GENERATED_STORED ? "STORED" :
|
||||
"VIRTUAL"));
|
||||
}
|
||||
else if (constraint->contype == CONSTR_CHECK ||
|
||||
constraint->contype == CONSTR_PRIMARY ||
|
||||
|
|
|
|||
|
|
@ -34,7 +34,14 @@ QualifyCreateStatisticsStmt(Node *node)
|
|||
{
|
||||
CreateStatsStmt *stmt = castNode(CreateStatsStmt, node);
|
||||
|
||||
RangeVar *relation = (RangeVar *) linitial(stmt->relations);
|
||||
Node *relationNode = (Node *) linitial(stmt->relations);
|
||||
|
||||
if (!IsA(relationNode, RangeVar))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
RangeVar *relation = (RangeVar *) relationNode;
|
||||
|
||||
if (relation->schemaname == NULL)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -510,6 +510,12 @@ static void get_json_table_nested_columns(TableFunc *tf, JsonTablePlan *plan,
|
|||
deparse_context *context,
|
||||
bool showimplicit,
|
||||
bool needcomma);
|
||||
static void
|
||||
map_var_through_join_alias(deparse_namespace *dpns, Var *v);
|
||||
static Var *unwrap_simple_var(Node *expr);
|
||||
static bool dpns_has_named_join(const deparse_namespace *dpns);
|
||||
static inline bool
|
||||
var_matches_base(const Var *v, Index want_varno, AttrNumber want_attno);
|
||||
|
||||
#define only_marker(rte) ((rte)->inh ? "" : "ONLY ")
|
||||
|
||||
|
|
@ -3804,6 +3810,8 @@ get_update_query_targetlist_def(Query *query, List *targetList,
|
|||
SubLink *cur_ma_sublink;
|
||||
List *ma_sublinks;
|
||||
|
||||
targetList = ExpandMergedSubscriptingRefEntries(targetList);
|
||||
|
||||
/*
|
||||
* Prepare to deal with MULTIEXPR assignments: collect the source SubLinks
|
||||
* into a list. We expect them to appear, in ID order, in resjunk tlist
|
||||
|
|
@ -3827,6 +3835,8 @@ get_update_query_targetlist_def(Query *query, List *targetList,
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
ensure_update_targetlist_in_param_order(targetList);
|
||||
}
|
||||
next_ma_cell = list_head(ma_sublinks);
|
||||
cur_ma_sublink = NULL;
|
||||
|
|
@ -4572,6 +4582,103 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
|
|||
return attname;
|
||||
}
|
||||
|
||||
|
||||
/* Any named join with joinaliasvars hides its inner aliases. */
|
||||
static inline bool
|
||||
dpns_has_named_join(const deparse_namespace *dpns)
|
||||
{
|
||||
if (!dpns || dpns->rtable == NIL)
|
||||
return false;
|
||||
|
||||
ListCell *lc;
|
||||
foreach (lc, dpns->rtable)
|
||||
{
|
||||
RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);
|
||||
if (rte && rte->rtekind == RTE_JOIN &&
|
||||
rte->alias != NULL && rte->joinaliasvars != NIL)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/* Unwrap trivial wrappers around a Var; return Var* or NULL. */
|
||||
static Var *
|
||||
unwrap_simple_var(Node *expr)
|
||||
{
|
||||
for (;;)
|
||||
{
|
||||
if (expr == NULL)
|
||||
return NULL;
|
||||
if (IsA(expr, Var))
|
||||
return (Var *) expr;
|
||||
if (IsA(expr, RelabelType))
|
||||
{ expr = (Node *) ((RelabelType *) expr)->arg; continue; }
|
||||
if (IsA(expr, CoerceToDomain))
|
||||
{ expr = (Node *) ((CoerceToDomain *) expr)->arg; continue; }
|
||||
if (IsA(expr, CollateExpr))
|
||||
{ expr = (Node *) ((CollateExpr *) expr)->arg; continue; }
|
||||
/* Not a simple Var */
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Base identity (canonical/synonym) match against a wanted (varno,attno) pair. */
|
||||
static inline bool
|
||||
var_matches_base(const Var *v, Index want_varno, AttrNumber want_attno)
|
||||
{
|
||||
if (v->varlevelsup != 0)
|
||||
return false;
|
||||
if (v->varno == want_varno && v->varattno == want_attno)
|
||||
return true;
|
||||
if (v->varnosyn > 0 && v->varattnosyn > 0 &&
|
||||
v->varnosyn == want_varno && v->varattnosyn == want_attno)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* Mutate v in place: if v maps to a named JOIN's column, set varnosyn/varattnosyn.
|
||||
* Returns true iff SYN fields were set. Query deparse only (dpns->plan == NULL). */
|
||||
static void
|
||||
map_var_through_join_alias(deparse_namespace *dpns, Var *v)
|
||||
{
|
||||
if (!dpns || dpns->plan != NULL || !v ||
|
||||
v->varlevelsup != 0 || v->varattno <= 0)
|
||||
return;
|
||||
|
||||
int rti = 0;
|
||||
ListCell *lc;
|
||||
foreach (lc, dpns->rtable)
|
||||
{
|
||||
rti++;
|
||||
RangeTblEntry *jrte = (RangeTblEntry *) lfirst(lc);
|
||||
if (!jrte || jrte->rtekind != RTE_JOIN ||
|
||||
jrte->alias == NULL || jrte->joinaliasvars == NIL)
|
||||
continue;
|
||||
|
||||
AttrNumber jattno = 0;
|
||||
ListCell *vlc;
|
||||
foreach (vlc, jrte->joinaliasvars)
|
||||
{
|
||||
jattno++;
|
||||
Var *aliasVar = unwrap_simple_var((Node *) lfirst(vlc));
|
||||
if (!aliasVar)
|
||||
continue;
|
||||
|
||||
if (var_matches_base(aliasVar, v->varno, v->varattno))
|
||||
{
|
||||
v->varnosyn = (Index) rti;
|
||||
v->varattnosyn = jattno;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Deparse a Var which references OUTER_VAR, INNER_VAR, or INDEX_VAR. This
|
||||
* routine is actually a callback for get_special_varno, which handles finding
|
||||
|
|
@ -6586,6 +6693,11 @@ get_rule_expr(Node *node, deparse_context *context,
|
|||
Assert(list_length(rowexpr->args) <= tupdesc->natts);
|
||||
}
|
||||
|
||||
/* Precompute deparse ns and whether we even need to try mapping */
|
||||
deparse_namespace *dpns = (context->namespaces != NIL)
|
||||
? (deparse_namespace *) linitial(context->namespaces) : NULL;
|
||||
bool try_map = (dpns && dpns->plan == NULL && dpns_has_named_join(dpns));
|
||||
|
||||
/*
|
||||
* SQL99 allows "ROW" to be omitted when there is more than
|
||||
* one column, but for simplicity we always print it.
|
||||
|
|
@ -6601,6 +6713,17 @@ get_rule_expr(Node *node, deparse_context *context,
|
|||
!TupleDescAttr(tupdesc, i)->attisdropped)
|
||||
{
|
||||
appendStringInfoString(buf, sep);
|
||||
|
||||
/* PG18: if element is a simple base Var, set its SYN to the JOIN alias */
|
||||
if (try_map)
|
||||
{
|
||||
Var *v = unwrap_simple_var(e);
|
||||
if (v)
|
||||
{
|
||||
map_var_through_join_alias(dpns, v);
|
||||
}
|
||||
}
|
||||
|
||||
/* Whole-row Vars need special treatment here */
|
||||
get_rule_expr_toplevel(e, context, true);
|
||||
sep = ", ";
|
||||
|
|
|
|||
|
|
@ -43,6 +43,8 @@ PG_FUNCTION_INFO_V1(citus_promote_clone_and_rebalance);
|
|||
Datum
|
||||
citus_promote_clone_and_rebalance(PG_FUNCTION_ARGS)
|
||||
{
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
/* Ensure superuser and coordinator */
|
||||
EnsureSuperUser();
|
||||
EnsureCoordinator();
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@
|
|||
#include "postgres.h"
|
||||
|
||||
#include "funcapi.h"
|
||||
#include "miscadmin.h"
|
||||
|
||||
#include "access/htup_details.h"
|
||||
#include "access/xact.h"
|
||||
|
|
@ -124,8 +125,10 @@ static void AdjustReadIntermediateResultsCostInternal(RelOptInfo *relOptInfo,
|
|||
Const *resultFormatConst);
|
||||
static List * OuterPlanParamsList(PlannerInfo *root);
|
||||
static List * CopyPlanParamList(List *originalPlanParamList);
|
||||
static PlannerRestrictionContext * CreateAndPushPlannerRestrictionContext(
|
||||
FastPathRestrictionContext *fastPathContext);
|
||||
static void CreateAndPushPlannerRestrictionContext(
|
||||
DistributedPlanningContext *planContext,
|
||||
FastPathRestrictionContext *
|
||||
fastPathContext);
|
||||
static PlannerRestrictionContext * CurrentPlannerRestrictionContext(void);
|
||||
static void PopPlannerRestrictionContext(void);
|
||||
static void ResetPlannerRestrictionContext(
|
||||
|
|
@ -144,6 +147,9 @@ static void ConcatenateRTablesAndPerminfos(PlannedStmt *mainPlan,
|
|||
static bool CheckPostPlanDistribution(DistributedPlanningContext *planContext,
|
||||
bool isDistributedQuery,
|
||||
List *rangeTableList);
|
||||
#if PG_VERSION_NUM >= PG_VERSION_18
|
||||
static int DisableSelfJoinElimination(void);
|
||||
#endif
|
||||
|
||||
/* Distributed planner hook */
|
||||
PlannedStmt *
|
||||
|
|
@ -155,6 +161,9 @@ distributed_planner(Query *parse,
|
|||
bool needsDistributedPlanning = false;
|
||||
bool fastPathRouterQuery = false;
|
||||
FastPathRestrictionContext fastPathContext = { 0 };
|
||||
#if PG_VERSION_NUM >= PG_VERSION_18
|
||||
int saveNestLevel = -1;
|
||||
#endif
|
||||
|
||||
List *rangeTableList = ExtractRangeTableEntryList(parse);
|
||||
|
||||
|
|
@ -218,6 +227,10 @@ distributed_planner(Query *parse,
|
|||
bool setPartitionedTablesInherited = false;
|
||||
AdjustPartitioningForDistributedPlanning(rangeTableList,
|
||||
setPartitionedTablesInherited);
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_18
|
||||
saveNestLevel = DisableSelfJoinElimination();
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -234,9 +247,9 @@ distributed_planner(Query *parse,
|
|||
*/
|
||||
HideCitusDependentObjectsOnQueriesOfPgMetaTables((Node *) parse, NULL);
|
||||
|
||||
/* create a restriction context and put it at the end of context list */
|
||||
planContext.plannerRestrictionContext = CreateAndPushPlannerRestrictionContext(
|
||||
&fastPathContext);
|
||||
/* create a restriction context and put it at the end of our plan context's context list */
|
||||
CreateAndPushPlannerRestrictionContext(&planContext,
|
||||
&fastPathContext);
|
||||
|
||||
/*
|
||||
* We keep track of how many times we've recursed into the planner, primarily
|
||||
|
|
@ -264,6 +277,16 @@ distributed_planner(Query *parse,
|
|||
planContext.plan = standard_planner(planContext.query, NULL,
|
||||
planContext.cursorOptions,
|
||||
planContext.boundParams);
|
||||
#if PG_VERSION_NUM >= PG_VERSION_18
|
||||
if (needsDistributedPlanning)
|
||||
{
|
||||
Assert(saveNestLevel > 0);
|
||||
AtEOXact_GUC(true, saveNestLevel);
|
||||
}
|
||||
|
||||
/* Pop the plan context from the current restriction context */
|
||||
planContext.plannerRestrictionContext->planContext = NULL;
|
||||
#endif
|
||||
needsDistributedPlanning = CheckPostPlanDistribution(&planContext,
|
||||
needsDistributedPlanning,
|
||||
rangeTableList);
|
||||
|
|
@ -2015,6 +2038,32 @@ multi_relation_restriction_hook(PlannerInfo *root, RelOptInfo *relOptInfo,
|
|||
lappend(relationRestrictionContext->relationRestrictionList, relationRestriction);
|
||||
|
||||
MemoryContextSwitchTo(oldMemoryContext);
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_18
|
||||
if (root->query_level == 1 && plannerRestrictionContext->planContext != NULL)
|
||||
{
|
||||
/* We're at the top query with a distributed context; see if Postgres
|
||||
* has changed the query tree we passed to it in distributed_planner().
|
||||
* This check was necessitated by PG commit 1e4351a, becuase in it the
|
||||
* planner modfies a copy of the passed in query tree with the consequence
|
||||
* that changes are not reflected back to the caller of standard_planner().
|
||||
*/
|
||||
Query *query = plannerRestrictionContext->planContext->query;
|
||||
if (root->parse != query)
|
||||
{
|
||||
/*
|
||||
* The Postgres planner has reconstructed the query tree, so the query
|
||||
* tree our distributed context passed in (to standard_planner() is
|
||||
* updated to track the new query tree.
|
||||
*/
|
||||
ereport(DEBUG4, (errmsg(
|
||||
"Detected query reconstruction by Postgres planner, updating "
|
||||
"planContext to track it")));
|
||||
|
||||
plannerRestrictionContext->planContext->query = root->parse;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -2392,11 +2441,13 @@ CopyPlanParamList(List *originalPlanParamList)
|
|||
* context with an empty relation restriction context and an empty join and
|
||||
* a copy of the given fast path restriction context (if present). Finally,
|
||||
* the planner restriction context is inserted to the beginning of the
|
||||
* global plannerRestrictionContextList and it is returned.
|
||||
* global plannerRestrictionContextList and, in PG18+, given a reference to
|
||||
* its distributed plan context.
|
||||
*/
|
||||
static PlannerRestrictionContext *
|
||||
CreateAndPushPlannerRestrictionContext(
|
||||
FastPathRestrictionContext *fastPathRestrictionContext)
|
||||
static void
|
||||
CreateAndPushPlannerRestrictionContext(DistributedPlanningContext *planContext,
|
||||
FastPathRestrictionContext *
|
||||
fastPathRestrictionContext)
|
||||
{
|
||||
PlannerRestrictionContext *plannerRestrictionContext =
|
||||
palloc0(sizeof(PlannerRestrictionContext));
|
||||
|
|
@ -2433,7 +2484,11 @@ CreateAndPushPlannerRestrictionContext(
|
|||
plannerRestrictionContextList = lcons(plannerRestrictionContext,
|
||||
plannerRestrictionContextList);
|
||||
|
||||
return plannerRestrictionContext;
|
||||
planContext->plannerRestrictionContext = plannerRestrictionContext;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_18
|
||||
plannerRestrictionContext->planContext = planContext;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -2494,6 +2549,18 @@ CurrentPlannerRestrictionContext(void)
|
|||
static void
|
||||
PopPlannerRestrictionContext(void)
|
||||
{
|
||||
#if PG_VERSION_NUM >= PG_VERSION_18
|
||||
|
||||
/*
|
||||
* PG18+: Clear the restriction context's planContext pointer; this is done
|
||||
* by distributed_planner() when popping the context, but in case of error
|
||||
* during standard_planner() we want to clean up here also.
|
||||
*/
|
||||
PlannerRestrictionContext *plannerRestrictionContext =
|
||||
(PlannerRestrictionContext *) linitial(plannerRestrictionContextList);
|
||||
plannerRestrictionContext->planContext = NULL;
|
||||
#endif
|
||||
|
||||
plannerRestrictionContextList = list_delete_first(plannerRestrictionContextList);
|
||||
}
|
||||
|
||||
|
|
@ -2791,3 +2858,27 @@ CheckPostPlanDistribution(DistributedPlanningContext *planContext, bool
|
|||
|
||||
return isDistributedQuery;
|
||||
}
|
||||
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_18
|
||||
|
||||
/*
|
||||
* DisableSelfJoinElimination is used to prevent self join elimination
|
||||
* during distributed query planning to ensure shard queries are correctly
|
||||
* generated. PG18's self join elimination (fc069a3a6) changes the Query
|
||||
* in a way that can cause problems for queries with a mix of Citus and
|
||||
* Postgres tables. Self join elimination is allowed on Postgres tables
|
||||
* only so queries involving shards get the benefit of it.
|
||||
*/
|
||||
static int
|
||||
DisableSelfJoinElimination(void)
|
||||
{
|
||||
int NestLevel = NewGUCNestLevel();
|
||||
set_config_option("enable_self_join_elimination", "off",
|
||||
(superuser() ? PGC_SUSET : PGC_USERSET), PGC_S_SESSION,
|
||||
GUC_ACTION_LOCAL, true, 0, false);
|
||||
return NestLevel;
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -273,8 +273,16 @@ FastPathRouterQuery(Query *query, FastPathRestrictionContext *fastPathContext)
|
|||
return true;
|
||||
}
|
||||
|
||||
/* make sure that the only range table in FROM clause */
|
||||
if (list_length(query->rtable) != 1)
|
||||
int numFromRels = list_length(query->rtable);
|
||||
|
||||
/* make sure that there is only one range table in FROM clause */
|
||||
if ((numFromRels != 1)
|
||||
#if PG_VERSION_NUM >= PG_VERSION_18
|
||||
|
||||
/* with a PG18+ twist for GROUP rte - if present make sure there's two range tables */
|
||||
&& (!query->hasGroupRTE || numFromRels != 2)
|
||||
#endif
|
||||
)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -428,11 +428,10 @@ CreateInsertSelectIntoLocalTablePlan(uint64 planId, Query *insertSelectQuery,
|
|||
ParamListInfo boundParams, bool hasUnresolvedParams,
|
||||
PlannerRestrictionContext *plannerRestrictionContext)
|
||||
{
|
||||
RangeTblEntry *selectRte = ExtractSelectRangeTableEntry(insertSelectQuery);
|
||||
|
||||
PrepareInsertSelectForCitusPlanner(insertSelectQuery);
|
||||
|
||||
/* get the SELECT query (may have changed after PrepareInsertSelectForCitusPlanner) */
|
||||
RangeTblEntry *selectRte = ExtractSelectRangeTableEntry(insertSelectQuery);
|
||||
Query *selectQuery = selectRte->subquery;
|
||||
|
||||
bool allowRecursivePlanning = true;
|
||||
|
|
@ -513,6 +512,13 @@ PrepareInsertSelectForCitusPlanner(Query *insertSelectQuery)
|
|||
|
||||
bool isWrapped = false;
|
||||
|
||||
/*
|
||||
* PG18 is stricter about GroupRTE/GroupVar. For INSERT … SELECT with a GROUP BY,
|
||||
* flatten the SELECT’s targetList and havingQual so Vars point to base RTEs and
|
||||
* avoid Unrecognized range table id.
|
||||
*/
|
||||
FlattenGroupExprs(selectRte->subquery);
|
||||
|
||||
if (selectRte->subquery->setOperations != NULL)
|
||||
{
|
||||
/*
|
||||
|
|
@ -1431,11 +1437,6 @@ static DistributedPlan *
|
|||
CreateNonPushableInsertSelectPlan(uint64 planId, Query *parse, ParamListInfo boundParams)
|
||||
{
|
||||
Query *insertSelectQuery = copyObject(parse);
|
||||
|
||||
RangeTblEntry *selectRte = ExtractSelectRangeTableEntry(insertSelectQuery);
|
||||
RangeTblEntry *insertRte = ExtractResultRelationRTEOrError(insertSelectQuery);
|
||||
Oid targetRelationId = insertRte->relid;
|
||||
|
||||
DistributedPlan *distributedPlan = CitusMakeNode(DistributedPlan);
|
||||
distributedPlan->modLevel = RowModifyLevelForQuery(insertSelectQuery);
|
||||
|
||||
|
|
@ -1450,6 +1451,7 @@ CreateNonPushableInsertSelectPlan(uint64 planId, Query *parse, ParamListInfo bou
|
|||
PrepareInsertSelectForCitusPlanner(insertSelectQuery);
|
||||
|
||||
/* get the SELECT query (may have changed after PrepareInsertSelectForCitusPlanner) */
|
||||
RangeTblEntry *selectRte = ExtractSelectRangeTableEntry(insertSelectQuery);
|
||||
Query *selectQuery = selectRte->subquery;
|
||||
|
||||
/*
|
||||
|
|
@ -1472,6 +1474,9 @@ CreateNonPushableInsertSelectPlan(uint64 planId, Query *parse, ParamListInfo bou
|
|||
PlannedStmt *selectPlan = pg_plan_query(selectQueryCopy, NULL, cursorOptions,
|
||||
boundParams);
|
||||
|
||||
/* decide whether we can repartition the results */
|
||||
RangeTblEntry *insertRte = ExtractResultRelationRTEOrError(insertSelectQuery);
|
||||
Oid targetRelationId = insertRte->relid;
|
||||
bool repartitioned = IsRedistributablePlan(selectPlan->planTree) &&
|
||||
IsSupportedRedistributionTarget(targetRelationId);
|
||||
|
||||
|
|
|
|||
|
|
@ -41,6 +41,7 @@
|
|||
static int SourceResultPartitionColumnIndex(Query *mergeQuery,
|
||||
List *sourceTargetList,
|
||||
CitusTableCacheEntry *targetRelation);
|
||||
static int FindTargetListEntryWithVarExprAttno(List *targetList, AttrNumber varattno);
|
||||
static Var * ValidateAndReturnVarIfSupported(Node *entryExpr);
|
||||
static DeferredErrorMessage * DeferErrorIfTargetHasFalseClause(Oid targetRelationId,
|
||||
PlannerRestrictionContext *
|
||||
|
|
@ -422,10 +423,13 @@ ErrorIfMergeHasUnsupportedTables(Oid targetRelationId, List *rangeTableList)
|
|||
case RTE_VALUES:
|
||||
case RTE_JOIN:
|
||||
case RTE_CTE:
|
||||
{
|
||||
/* Skip them as base table(s) will be checked */
|
||||
continue;
|
||||
}
|
||||
#if PG_VERSION_NUM >= PG_VERSION_18
|
||||
case RTE_GROUP:
|
||||
#endif
|
||||
{
|
||||
/* Skip them as base table(s) will be checked */
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* RTE_NAMEDTUPLESTORE is typically used in ephmeral named relations,
|
||||
|
|
@ -628,6 +632,22 @@ MergeQualAndTargetListFunctionsSupported(Oid resultRelationId, Query *query,
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* joinTree->quals, retrieved by GetMergeJoinTree() - either from
|
||||
* mergeJoinCondition (PG >= 17) or jointree->quals (PG < 17),
|
||||
* only contains the quals that present in "ON (..)" clause. Action
|
||||
* quals that can be specified for each specific action, as in
|
||||
* "WHEN <match condition> AND <action quals> THEN <action>"", are
|
||||
* saved into "qual" field of the corresponding action's entry in
|
||||
* mergeActionList, see
|
||||
* https://github.com/postgres/postgres/blob/e6da68a6e1d60a037b63a9c9ed36e5ef0a996769/src/backend/parser/parse_merge.c#L285-L293.
|
||||
*
|
||||
* For this reason, even if TargetEntryChangesValue() could prove that
|
||||
* an action's quals ensure that the action cannot change the distribution
|
||||
* key, this is not the case as we don't provide action quals to
|
||||
* TargetEntryChangesValue(), but just joinTree, which only contains
|
||||
* the "ON (..)" clause quals.
|
||||
*/
|
||||
if (targetEntryDistributionColumn &&
|
||||
TargetEntryChangesValue(targetEntry, distributionColumn, joinTree))
|
||||
{
|
||||
|
|
@ -1411,7 +1431,8 @@ SourceResultPartitionColumnIndex(Query *mergeQuery, List *sourceTargetList,
|
|||
Assert(sourceRepartitionVar);
|
||||
|
||||
int sourceResultRepartitionColumnIndex =
|
||||
DistributionColumnIndex(sourceTargetList, sourceRepartitionVar);
|
||||
FindTargetListEntryWithVarExprAttno(sourceTargetList,
|
||||
sourceRepartitionVar->varattno);
|
||||
|
||||
if (sourceResultRepartitionColumnIndex == -1)
|
||||
{
|
||||
|
|
@ -1562,6 +1583,33 @@ FetchAndValidateInsertVarIfExists(Oid targetRelationId, Query *query)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* FindTargetListEntryWithVarExprAttno finds the index of the target
|
||||
* entry whose expr is a Var that points to input varattno.
|
||||
*
|
||||
* If no such target entry is found, it returns -1.
|
||||
*/
|
||||
static int
|
||||
FindTargetListEntryWithVarExprAttno(List *targetList, AttrNumber varattno)
|
||||
{
|
||||
int targetEntryIndex = 0;
|
||||
|
||||
TargetEntry *targetEntry = NULL;
|
||||
foreach_declared_ptr(targetEntry, targetList)
|
||||
{
|
||||
if (IsA(targetEntry->expr, Var) &&
|
||||
((Var *) targetEntry->expr)->varattno == varattno)
|
||||
{
|
||||
return targetEntryIndex;
|
||||
}
|
||||
|
||||
targetEntryIndex++;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* IsLocalTableModification returns true if the table modified is a Postgres table.
|
||||
* We do not support recursive planning for MERGE yet, so we could have a join
|
||||
|
|
|
|||
|
|
@ -149,13 +149,6 @@ typedef struct ExplainAnalyzeDestination
|
|||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17 && PG_VERSION_NUM < PG_VERSION_18
|
||||
|
||||
/*
|
||||
* Various places within need to convert bytes to kilobytes. Round these up
|
||||
* to the next whole kilobyte.
|
||||
* copied from explain.c
|
||||
*/
|
||||
#define BYTES_TO_KILOBYTES(b) (((b) + 1023) / 1024)
|
||||
|
||||
/* copied from explain.c */
|
||||
/* Instrumentation data for SERIALIZE option */
|
||||
typedef struct SerializeMetrics
|
||||
|
|
@ -166,13 +159,7 @@ typedef struct SerializeMetrics
|
|||
} SerializeMetrics;
|
||||
|
||||
/* copied from explain.c */
|
||||
static bool peek_buffer_usage(ExplainState *es, const BufferUsage *usage);
|
||||
static void show_buffer_usage(ExplainState *es, const BufferUsage *usage);
|
||||
static void show_memory_counters(ExplainState *es,
|
||||
const MemoryContextCounters *mem_counters);
|
||||
static void ExplainIndentText(ExplainState *es);
|
||||
static void ExplainPrintSerialize(ExplainState *es,
|
||||
SerializeMetrics *metrics);
|
||||
static SerializeMetrics GetSerializationMetrics(DestReceiver *dest);
|
||||
|
||||
/*
|
||||
|
|
@ -200,6 +187,23 @@ typedef struct SerializeDestReceiver
|
|||
} SerializeDestReceiver;
|
||||
#endif
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17
|
||||
|
||||
/*
|
||||
* Various places within need to convert bytes to kilobytes. Round these up
|
||||
* to the next whole kilobyte.
|
||||
* copied from explain.c
|
||||
*/
|
||||
#define BYTES_TO_KILOBYTES(b) (((b) + 1023) / 1024)
|
||||
|
||||
/* copied from explain.c */
|
||||
static bool peek_buffer_usage(ExplainState *es, const BufferUsage *usage);
|
||||
static void show_buffer_usage(ExplainState *es, const BufferUsage *usage);
|
||||
static void show_memory_counters(ExplainState *es,
|
||||
const MemoryContextCounters *mem_counters);
|
||||
static void ExplainPrintSerialize(ExplainState *es,
|
||||
SerializeMetrics *metrics);
|
||||
#endif
|
||||
|
||||
/* Explain functions for distributed queries */
|
||||
static void ExplainSubPlans(DistributedPlan *distributedPlan, ExplainState *es);
|
||||
|
|
@ -2409,7 +2413,7 @@ ExplainWorkerPlan(PlannedStmt *plannedstmt, DistributedSubPlan *subPlan, DestRec
|
|||
/* Create textual dump of plan tree */
|
||||
ExplainPrintPlan(es, queryDesc);
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17 && PG_VERSION_NUM < PG_VERSION_18
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17
|
||||
/* Show buffer and/or memory usage in planning */
|
||||
if (peek_buffer_usage(es, bufusage) || mem_counters)
|
||||
{
|
||||
|
|
@ -2455,7 +2459,7 @@ ExplainWorkerPlan(PlannedStmt *plannedstmt, DistributedSubPlan *subPlan, DestRec
|
|||
if (es->costs)
|
||||
ExplainPrintJITSummary(es, queryDesc);
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17 && PG_VERSION_NUM < PG_VERSION_18
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17
|
||||
if (es->serialize != EXPLAIN_SERIALIZE_NONE)
|
||||
{
|
||||
/* the SERIALIZE option requires its own tuple receiver */
|
||||
|
|
@ -2530,6 +2534,50 @@ elapsed_time(instr_time *starttime)
|
|||
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17 && PG_VERSION_NUM < PG_VERSION_18
|
||||
/*
|
||||
* Indent a text-format line.
|
||||
*
|
||||
* We indent by two spaces per indentation level. However, when emitting
|
||||
* data for a parallel worker there might already be data on the current line
|
||||
* (cf. ExplainOpenWorker); in that case, don't indent any more.
|
||||
*
|
||||
* Copied from explain.c.
|
||||
*/
|
||||
static void
|
||||
ExplainIndentText(ExplainState *es)
|
||||
{
|
||||
Assert(es->format == EXPLAIN_FORMAT_TEXT);
|
||||
if (es->str->len == 0 || es->str->data[es->str->len - 1] == '\n')
|
||||
appendStringInfoSpaces(es->str, es->indent * 2);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetSerializationMetrics - collect metrics
|
||||
*
|
||||
* We have to be careful here since the receiver could be an IntoRel
|
||||
* receiver if the subject statement is CREATE TABLE AS. In that
|
||||
* case, return all-zeroes stats.
|
||||
*
|
||||
* Copied from explain.c.
|
||||
*/
|
||||
static SerializeMetrics
|
||||
GetSerializationMetrics(DestReceiver *dest)
|
||||
{
|
||||
SerializeMetrics empty;
|
||||
|
||||
if (dest->mydest == DestExplainSerialize)
|
||||
return ((SerializeDestReceiver *) dest)->metrics;
|
||||
|
||||
memset(&empty, 0, sizeof(SerializeMetrics));
|
||||
INSTR_TIME_SET_ZERO(empty.timeSpent);
|
||||
|
||||
return empty;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17
|
||||
/*
|
||||
* Return whether show_buffer_usage would have anything to print, if given
|
||||
* the same 'usage' data. Note that when the format is anything other than
|
||||
|
|
@ -2747,24 +2795,6 @@ show_buffer_usage(ExplainState *es, const BufferUsage *usage)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* Indent a text-format line.
|
||||
*
|
||||
* We indent by two spaces per indentation level. However, when emitting
|
||||
* data for a parallel worker there might already be data on the current line
|
||||
* (cf. ExplainOpenWorker); in that case, don't indent any more.
|
||||
*
|
||||
* Copied from explain.c.
|
||||
*/
|
||||
static void
|
||||
ExplainIndentText(ExplainState *es)
|
||||
{
|
||||
Assert(es->format == EXPLAIN_FORMAT_TEXT);
|
||||
if (es->str->len == 0 || es->str->data[es->str->len - 1] == '\n')
|
||||
appendStringInfoSpaces(es->str, es->indent * 2);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Show memory usage details.
|
||||
*
|
||||
|
|
@ -2850,28 +2880,4 @@ ExplainPrintSerialize(ExplainState *es, SerializeMetrics *metrics)
|
|||
|
||||
ExplainCloseGroup("Serialization", "Serialization", true, es);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetSerializationMetrics - collect metrics
|
||||
*
|
||||
* We have to be careful here since the receiver could be an IntoRel
|
||||
* receiver if the subject statement is CREATE TABLE AS. In that
|
||||
* case, return all-zeroes stats.
|
||||
*
|
||||
* Copied from explain.c.
|
||||
*/
|
||||
static SerializeMetrics
|
||||
GetSerializationMetrics(DestReceiver *dest)
|
||||
{
|
||||
SerializeMetrics empty;
|
||||
|
||||
if (dest->mydest == DestExplainSerialize)
|
||||
return ((SerializeDestReceiver *) dest)->metrics;
|
||||
|
||||
memset(&empty, 0, sizeof(SerializeMetrics));
|
||||
INSTR_TIME_SET_ZERO(empty.timeSpent);
|
||||
|
||||
return empty;
|
||||
}
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -297,8 +297,19 @@ TargetListOnPartitionColumn(Query *query, List *targetEntryList)
|
|||
bool
|
||||
FindNodeMatchingCheckFunctionInRangeTableList(List *rtable, CheckNodeFunc checker)
|
||||
{
|
||||
int rtWalkFlags = QTW_EXAMINE_RTES_BEFORE;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_18
|
||||
|
||||
/*
|
||||
* PG18+: Do not descend into GROUP BY expressions subqueries, they
|
||||
* have already been visited as recursive planning is depth-first.
|
||||
*/
|
||||
rtWalkFlags |= QTW_IGNORE_GROUPEXPRS;
|
||||
#endif
|
||||
|
||||
return range_table_walker(rtable, FindNodeMatchingCheckFunction, checker,
|
||||
QTW_EXAMINE_RTES_BEFORE);
|
||||
rtWalkFlags);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -2501,11 +2501,16 @@ ErrorIfUnsupportedShardDistribution(Query *query)
|
|||
currentRelationId);
|
||||
if (!coPartitionedTables)
|
||||
{
|
||||
char *firstRelName = get_rel_name(firstTableRelationId);
|
||||
char *currentRelName = get_rel_name(currentRelationId);
|
||||
int compareResult = strcmp(firstRelName, currentRelName);
|
||||
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot push down this subquery"),
|
||||
errdetail("%s and %s are not colocated",
|
||||
get_rel_name(firstTableRelationId),
|
||||
get_rel_name(currentRelationId))));
|
||||
(compareResult > 0 ? currentRelName : firstRelName),
|
||||
(compareResult > 0 ? firstRelName :
|
||||
currentRelName))));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -3173,16 +3178,25 @@ BuildBaseConstraint(Var *column)
|
|||
|
||||
|
||||
/*
|
||||
* MakeOpExpression builds an operator expression node. This operator expression
|
||||
* implements the operator clause as defined by the variable and the strategy
|
||||
* number.
|
||||
* MakeOpExpressionExtended builds an operator expression node that's of
|
||||
* the form "Var <op> Expr", where, Expr must either be a Const or a Var
|
||||
* (*1).
|
||||
*
|
||||
* This operator expression implements the operator clause as defined by
|
||||
* the variable and the strategy number.
|
||||
*/
|
||||
OpExpr *
|
||||
MakeOpExpression(Var *variable, int16 strategyNumber)
|
||||
MakeOpExpressionExtended(Var *leftVar, Expr *rightArg, int16 strategyNumber)
|
||||
{
|
||||
Oid typeId = variable->vartype;
|
||||
Oid typeModId = variable->vartypmod;
|
||||
Oid collationId = variable->varcollid;
|
||||
/*
|
||||
* Other types of expressions are probably also fine to be used, but
|
||||
* none of the callers need support for them for now, so we haven't
|
||||
* tested them (*1).
|
||||
*/
|
||||
Assert(IsA(rightArg, Const) || IsA(rightArg, Var));
|
||||
|
||||
Oid typeId = leftVar->vartype;
|
||||
Oid collationId = leftVar->varcollid;
|
||||
|
||||
Oid accessMethodId = BTREE_AM_OID;
|
||||
|
||||
|
|
@ -3200,18 +3214,16 @@ MakeOpExpression(Var *variable, int16 strategyNumber)
|
|||
*/
|
||||
if (operatorClassInputType != typeId && typeType != TYPTYPE_PSEUDO)
|
||||
{
|
||||
variable = (Var *) makeRelabelType((Expr *) variable, operatorClassInputType,
|
||||
-1, collationId, COERCE_IMPLICIT_CAST);
|
||||
leftVar = (Var *) makeRelabelType((Expr *) leftVar, operatorClassInputType,
|
||||
-1, collationId, COERCE_IMPLICIT_CAST);
|
||||
}
|
||||
|
||||
Const *constantValue = makeNullConst(operatorClassInputType, typeModId, collationId);
|
||||
|
||||
/* Now make the expression with the given variable and a null constant */
|
||||
OpExpr *expression = (OpExpr *) make_opclause(operatorId,
|
||||
InvalidOid, /* no result type yet */
|
||||
false, /* no return set */
|
||||
(Expr *) variable,
|
||||
(Expr *) constantValue,
|
||||
(Expr *) leftVar,
|
||||
rightArg,
|
||||
InvalidOid, collationId);
|
||||
|
||||
/* Set implementing function id and result type */
|
||||
|
|
@ -3222,6 +3234,31 @@ MakeOpExpression(Var *variable, int16 strategyNumber)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* MakeOpExpression is a wrapper around MakeOpExpressionExtended
|
||||
* that creates a null constant of the appropriate type for right
|
||||
* hand side operator class input type. As a result, it builds an
|
||||
* operator expression node that's of the form "Var <op> NULL".
|
||||
*/
|
||||
OpExpr *
|
||||
MakeOpExpression(Var *leftVar, int16 strategyNumber)
|
||||
{
|
||||
Oid typeId = leftVar->vartype;
|
||||
Oid typeModId = leftVar->vartypmod;
|
||||
Oid collationId = leftVar->varcollid;
|
||||
|
||||
Oid accessMethodId = BTREE_AM_OID;
|
||||
|
||||
OperatorCacheEntry *operatorCacheEntry = LookupOperatorByType(typeId, accessMethodId,
|
||||
strategyNumber);
|
||||
Oid operatorClassInputType = operatorCacheEntry->operatorClassInputType;
|
||||
|
||||
Const *constantValue = makeNullConst(operatorClassInputType, typeModId, collationId);
|
||||
|
||||
return MakeOpExpressionExtended(leftVar, (Expr *) constantValue, strategyNumber);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* LookupOperatorByType is a wrapper around GetOperatorByType(),
|
||||
* operatorClassInputType() and get_typtype() functions that uses a cache to avoid
|
||||
|
|
|
|||
|
|
@ -372,6 +372,25 @@ AddPartitionKeyNotNullFilterToSelect(Query *subqery)
|
|||
/* we should have found target partition column */
|
||||
Assert(targetPartitionColumnVar != NULL);
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_18
|
||||
if (subqery->hasGroupRTE)
|
||||
{
|
||||
/* if the partition column is a grouped column, we need to flatten it
|
||||
* to ensure query deparsing works correctly. We choose to do this here
|
||||
* instead of in ruletils.c because we want to keep the flattening logic
|
||||
* close to the NOT NULL filter injection.
|
||||
*/
|
||||
RangeTblEntry *partitionRTE = rt_fetch(targetPartitionColumnVar->varno,
|
||||
subqery->rtable);
|
||||
if (partitionRTE->rtekind == RTE_GROUP)
|
||||
{
|
||||
targetPartitionColumnVar = (Var *) flatten_group_exprs(NULL, subqery,
|
||||
(Node *)
|
||||
targetPartitionColumnVar);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* create expression for partition_column IS NOT NULL */
|
||||
NullTest *nullTest = makeNode(NullTest);
|
||||
nullTest->nulltesttype = IS_NOT_NULL;
|
||||
|
|
@ -1609,10 +1628,19 @@ MasterIrreducibleExpressionFunctionChecker(Oid func_id, void *context)
|
|||
|
||||
/*
|
||||
* TargetEntryChangesValue determines whether the given target entry may
|
||||
* change the value in a given column, given a join tree. The result is
|
||||
* true unless the expression refers directly to the column, or the
|
||||
* expression is a value that is implied by the qualifiers of the join
|
||||
* tree, or the target entry sets a different column.
|
||||
* change the value given a column and a join tree.
|
||||
*
|
||||
* The function assumes that the "targetEntry" references given "column"
|
||||
* Var via its "resname" and is used as part of a modify query. This means
|
||||
* that, for example, for an update query, the input "targetEntry" constructs
|
||||
* the following assignment operation as part of the SET clause:
|
||||
* "col_a = expr_a ", where, "col_a" refers to input "column" Var (via
|
||||
* "resname") as per the assumption written above. And we want to understand
|
||||
* if "expr_a" (which is pointed to by targetEntry->expr) refers directly to
|
||||
* the "column" Var, or "expr_a" is a value that is implied to be equal
|
||||
* to "column" Var by the qualifiers of the join tree. If so, we know that
|
||||
* the value of "col_a" effectively cannot be changed by this assignment
|
||||
* operation.
|
||||
*/
|
||||
bool
|
||||
TargetEntryChangesValue(TargetEntry *targetEntry, Var *column, FromExpr *joinTree)
|
||||
|
|
@ -1623,11 +1651,36 @@ TargetEntryChangesValue(TargetEntry *targetEntry, Var *column, FromExpr *joinTre
|
|||
if (IsA(setExpr, Var))
|
||||
{
|
||||
Var *newValue = (Var *) setExpr;
|
||||
if (newValue->varattno == column->varattno)
|
||||
if (column->varno == newValue->varno &&
|
||||
column->varattno == newValue->varattno)
|
||||
{
|
||||
/* target entry of the form SET col = table.col */
|
||||
/*
|
||||
* Target entry is of the form "SET col_a = foo.col_b",
|
||||
* where foo also points to the same range table entry
|
||||
* and col_a and col_b are the same. So, effectively
|
||||
* they're literally referring to the same column.
|
||||
*/
|
||||
isColumnValueChanged = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
List *restrictClauseList = WhereClauseList(joinTree);
|
||||
OpExpr *equalityExpr = MakeOpExpressionExtended(column, (Expr *) newValue,
|
||||
BTEqualStrategyNumber);
|
||||
|
||||
bool predicateIsImplied = predicate_implied_by(list_make1(equalityExpr),
|
||||
restrictClauseList, false);
|
||||
if (predicateIsImplied)
|
||||
{
|
||||
/*
|
||||
* Target entry is of the form
|
||||
* "SET col_a = foo.col_b WHERE col_a = foo.col_b (AND (...))",
|
||||
* where foo points to a different relation or it points
|
||||
* to the same relation but col_a is not the same column as col_b.
|
||||
*/
|
||||
isColumnValueChanged = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (IsA(setExpr, Const))
|
||||
{
|
||||
|
|
@ -1648,7 +1701,10 @@ TargetEntryChangesValue(TargetEntry *targetEntry, Var *column, FromExpr *joinTre
|
|||
restrictClauseList, false);
|
||||
if (predicateIsImplied)
|
||||
{
|
||||
/* target entry of the form SET col = <x> WHERE col = <x> AND ... */
|
||||
/*
|
||||
* Target entry is of the form
|
||||
* "SET col_a = const_a WHERE col_a = const_a (AND (...))".
|
||||
*/
|
||||
isColumnValueChanged = false;
|
||||
}
|
||||
}
|
||||
|
|
@ -2136,7 +2192,11 @@ CheckAndBuildDelayedFastPathPlan(DistributedPlanningContext *planContext,
|
|||
static bool
|
||||
ConvertToQueryOnShard(Query *query, Oid citusTableOid, Oid shardId)
|
||||
{
|
||||
Assert(list_length(query->rtable) == 1);
|
||||
Assert(list_length(query->rtable) == 1
|
||||
#if PG_VERSION_NUM >= PG_VERSION_18
|
||||
|| (list_length(query->rtable) == 2 && query->hasGroupRTE)
|
||||
#endif
|
||||
);
|
||||
RangeTblEntry *citusTableRte = (RangeTblEntry *) linitial(query->rtable);
|
||||
Assert(citusTableRte->relid == citusTableOid);
|
||||
|
||||
|
|
|
|||
|
|
@ -333,7 +333,9 @@ WhereOrHavingClauseContainsSubquery(Query *query)
|
|||
bool
|
||||
TargetListContainsSubquery(List *targetList)
|
||||
{
|
||||
return FindNodeMatchingCheckFunction((Node *) targetList, IsNodeSubquery);
|
||||
bool hasSubquery = FindNodeMatchingCheckFunction((Node *) targetList, IsNodeSubquery);
|
||||
|
||||
return hasSubquery;
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -1093,6 +1095,28 @@ DeferErrorIfCannotPushdownSubquery(Query *subqueryTree, bool outerMostQueryHasLi
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* FlattenGroupExprs flattens the GROUP BY expressions in the query tree
|
||||
* by replacing VAR nodes referencing the GROUP range table with the actual
|
||||
* GROUP BY expression. This is used by Citus planning to ensure correctness
|
||||
* when analysing and building the distributed plan.
|
||||
*/
|
||||
void
|
||||
FlattenGroupExprs(Query *queryTree)
|
||||
{
|
||||
#if PG_VERSION_NUM >= PG_VERSION_18
|
||||
if (queryTree->hasGroupRTE)
|
||||
{
|
||||
queryTree->targetList = (List *)
|
||||
flatten_group_exprs(NULL, queryTree,
|
||||
(Node *) queryTree->targetList);
|
||||
queryTree->havingQual =
|
||||
flatten_group_exprs(NULL, queryTree, queryTree->havingQual);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DeferErrorIfSubqueryRequiresMerge returns a deferred error if the subquery
|
||||
* requires a merge step on the coordinator (e.g. limit, group by non-distribution
|
||||
|
|
@ -1953,6 +1977,13 @@ static MultiNode *
|
|||
SubqueryPushdownMultiNodeTree(Query *originalQuery)
|
||||
{
|
||||
Query *queryTree = copyObject(originalQuery);
|
||||
|
||||
/*
|
||||
* PG18+ need to flatten GROUP BY expressions to ensure correct processing
|
||||
* later on, such as identification of partition columns in GROUP BY.
|
||||
*/
|
||||
FlattenGroupExprs(queryTree);
|
||||
|
||||
List *targetEntryList = queryTree->targetList;
|
||||
MultiCollect *subqueryCollectNode = CitusMakeNode(MultiCollect);
|
||||
|
||||
|
|
@ -2029,7 +2060,9 @@ SubqueryPushdownMultiNodeTree(Query *originalQuery)
|
|||
pushedDownQuery->setOperations = copyObject(queryTree->setOperations);
|
||||
pushedDownQuery->querySource = queryTree->querySource;
|
||||
pushedDownQuery->hasSubLinks = queryTree->hasSubLinks;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_18
|
||||
pushedDownQuery->hasGroupRTE = queryTree->hasGroupRTE;
|
||||
#endif
|
||||
MultiTable *subqueryNode = MultiSubqueryPushdownTable(pushedDownQuery);
|
||||
|
||||
SetChild((MultiUnaryNode *) subqueryCollectNode, (MultiNode *) subqueryNode);
|
||||
|
|
|
|||
|
|
@ -97,6 +97,7 @@
|
|||
#include "distributed/version_compat.h"
|
||||
|
||||
bool EnableRecurringOuterJoinPushdown = true;
|
||||
bool EnableOuterJoinsWithPseudoconstantQualsPrePG17 = false;
|
||||
|
||||
/*
|
||||
* RecursivePlanningContext is used to recursively plan subqueries
|
||||
|
|
@ -260,7 +261,6 @@ GenerateSubplansForSubqueriesAndCTEs(uint64 planId, Query *originalQuery,
|
|||
*/
|
||||
context.allDistributionKeysInQueryAreEqual =
|
||||
AllDistributionKeysInQueryAreEqual(originalQuery, plannerRestrictionContext);
|
||||
|
||||
DeferredErrorMessage *error = RecursivelyPlanSubqueriesAndCTEs(originalQuery,
|
||||
&context);
|
||||
if (error != NULL)
|
||||
|
|
@ -509,7 +509,7 @@ ShouldRecursivelyPlanOuterJoins(Query *query, RecursivePlanningContext *context)
|
|||
bool hasOuterJoin =
|
||||
context->plannerRestrictionContext->joinRestrictionContext->hasOuterJoin;
|
||||
#if PG_VERSION_NUM < PG_VERSION_17
|
||||
if (!hasOuterJoin)
|
||||
if (!EnableOuterJoinsWithPseudoconstantQualsPrePG17 && !hasOuterJoin)
|
||||
{
|
||||
/*
|
||||
* PG15 commit d1ef5631e620f9a5b6480a32bb70124c857af4f1
|
||||
|
|
@ -1122,14 +1122,10 @@ ExtractSublinkWalker(Node *node, List **sublinkList)
|
|||
static bool
|
||||
ShouldRecursivelyPlanSublinks(Query *query)
|
||||
{
|
||||
if (FindNodeMatchingCheckFunctionInRangeTableList(query->rtable,
|
||||
IsDistributedTableRTE))
|
||||
{
|
||||
/* there is a distributed table in the FROM clause */
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
bool hasDistributedTable = (FindNodeMatchingCheckFunctionInRangeTableList(
|
||||
query->rtable,
|
||||
IsDistributedTableRTE));
|
||||
return !hasDistributedTable;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -971,6 +971,40 @@ GetVarFromAssignedParam(List *outerPlanParamsList, Param *plannerParam,
|
|||
}
|
||||
}
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_18
|
||||
|
||||
/*
|
||||
* In PG18+, the dereferenced PARAM node could be a GroupVar if the
|
||||
* query has a GROUP BY. In that case, we need to make an extra
|
||||
* hop to get the underlying Var from the grouping expressions.
|
||||
*/
|
||||
if (assignedVar != NULL)
|
||||
{
|
||||
Query *parse = (*rootContainingVar)->parse;
|
||||
if (parse->hasGroupRTE)
|
||||
{
|
||||
RangeTblEntry *rte = rt_fetch(assignedVar->varno, parse->rtable);
|
||||
if (rte->rtekind == RTE_GROUP)
|
||||
{
|
||||
Assert(assignedVar->varattno >= 1 &&
|
||||
assignedVar->varattno <= list_length(rte->groupexprs));
|
||||
Node *groupVar = list_nth(rte->groupexprs, assignedVar->varattno - 1);
|
||||
if (IsA(groupVar, Var))
|
||||
{
|
||||
assignedVar = (Var *) groupVar;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* todo: handle PlaceHolderVar case if needed */
|
||||
ereport(DEBUG2, (errmsg(
|
||||
"GroupVar maps to non-Var group expr; bailing out")));
|
||||
assignedVar = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return assignedVar;
|
||||
}
|
||||
|
||||
|
|
@ -2431,7 +2465,7 @@ FilterJoinRestrictionContext(JoinRestrictionContext *joinRestrictionContext, Rel
|
|||
|
||||
/*
|
||||
* RangeTableArrayContainsAnyRTEIdentities returns true if any of the range table entries
|
||||
* int rangeTableEntries array is an range table relation specified in queryRteIdentities.
|
||||
* in rangeTableEntries array is a range table relation specified in queryRteIdentities.
|
||||
*/
|
||||
static bool
|
||||
RangeTableArrayContainsAnyRTEIdentities(RangeTblEntry **rangeTableEntries, int
|
||||
|
|
@ -2444,6 +2478,18 @@ RangeTableArrayContainsAnyRTEIdentities(RangeTblEntry **rangeTableEntries, int
|
|||
List *rangeTableRelationList = NULL;
|
||||
ListCell *rteRelationCell = NULL;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_18
|
||||
|
||||
/*
|
||||
* In PG18+, planner array simple_rte_array may contain NULL entries
|
||||
* for "dead relations". See PG commits 5f6f951 and e9a20e4 for details.
|
||||
*/
|
||||
if (rangeTableEntry == NULL)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Get list of all RTE_RELATIONs in the given range table entry
|
||||
* (i.e.,rangeTableEntry could be a subquery where we're interested
|
||||
|
|
|
|||
|
|
@ -1480,6 +1480,23 @@ RegisterCitusConfigVariables(void)
|
|||
GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomBoolVariable(
|
||||
"citus.enable_outer_joins_with_pseudoconstant_quals_pre_pg17",
|
||||
gettext_noop("Enables running distributed queries with outer joins "
|
||||
"and pseudoconstant quals pre PG17."),
|
||||
gettext_noop("Set to false by default. If set to true, enables "
|
||||
"running distributed queries with outer joins and "
|
||||
"pseudoconstant quals, at user's own risk, because "
|
||||
"pre PG17, Citus doesn't have access to "
|
||||
"set_join_pathlist_hook, which doesn't guarantee correct"
|
||||
"query results. Note that in PG17+, this GUC has no effect"
|
||||
"and the user can run such queries"),
|
||||
&EnableOuterJoinsWithPseudoconstantQualsPrePG17,
|
||||
false,
|
||||
PGC_USERSET,
|
||||
GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomBoolVariable(
|
||||
"citus.enable_recurring_outer_join_pushdown",
|
||||
gettext_noop("Enables outer join pushdown for recurring relations."),
|
||||
|
|
@ -2493,8 +2510,8 @@ RegisterCitusConfigVariables(void)
|
|||
NULL,
|
||||
&SkipAdvisoryLockPermissionChecks,
|
||||
false,
|
||||
GUC_SUPERUSER_ONLY,
|
||||
GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE,
|
||||
PGC_SUSET,
|
||||
GUC_SUPERUSER_ONLY | GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomBoolVariable(
|
||||
|
|
|
|||
|
|
@ -0,0 +1,5 @@
|
|||
-- citus--13.2-1--14.0-1
|
||||
-- bump version to 14.0-1
|
||||
|
||||
#include "udfs/citus_prepare_pg_upgrade/14.0-1.sql"
|
||||
#include "udfs/citus_finish_pg_upgrade/14.0-1.sql"
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
-- citus--14.0-1--13.2-1
|
||||
-- downgrade version to 13.2-1
|
||||
|
||||
#include "../udfs/citus_prepare_pg_upgrade/13.0-1.sql"
|
||||
#include "../udfs/citus_finish_pg_upgrade/13.2-1.sql"
|
||||
|
|
@ -0,0 +1,268 @@
|
|||
CREATE OR REPLACE FUNCTION pg_catalog.citus_finish_pg_upgrade()
|
||||
RETURNS void
|
||||
LANGUAGE plpgsql
|
||||
SET search_path = pg_catalog
|
||||
AS $cppu$
|
||||
DECLARE
|
||||
table_name regclass;
|
||||
command text;
|
||||
trigger_name text;
|
||||
BEGIN
|
||||
|
||||
|
||||
IF substring(current_Setting('server_version'), '\d+')::int >= 14 THEN
|
||||
EXECUTE $cmd$
|
||||
-- disable propagation to prevent EnsureCoordinator errors
|
||||
-- the aggregate created here does not depend on Citus extension (yet)
|
||||
-- since we add the dependency with the next command
|
||||
SET citus.enable_ddl_propagation TO OFF;
|
||||
CREATE AGGREGATE array_cat_agg(anycompatiblearray) (SFUNC = array_cat, STYPE = anycompatiblearray);
|
||||
COMMENT ON AGGREGATE array_cat_agg(anycompatiblearray)
|
||||
IS 'concatenate input arrays into a single array';
|
||||
RESET citus.enable_ddl_propagation;
|
||||
$cmd$;
|
||||
ELSE
|
||||
EXECUTE $cmd$
|
||||
SET citus.enable_ddl_propagation TO OFF;
|
||||
CREATE AGGREGATE array_cat_agg(anyarray) (SFUNC = array_cat, STYPE = anyarray);
|
||||
COMMENT ON AGGREGATE array_cat_agg(anyarray)
|
||||
IS 'concatenate input arrays into a single array';
|
||||
RESET citus.enable_ddl_propagation;
|
||||
$cmd$;
|
||||
END IF;
|
||||
|
||||
--
|
||||
-- Citus creates the array_cat_agg but because of a compatibility
|
||||
-- issue between pg13-pg14, we drop and create it during upgrade.
|
||||
-- And as Citus creates it, there needs to be a dependency to the
|
||||
-- Citus extension, so we create that dependency here.
|
||||
-- We are not using:
|
||||
-- ALTER EXENSION citus DROP/CREATE AGGREGATE array_cat_agg
|
||||
-- because we don't have an easy way to check if the aggregate
|
||||
-- exists with anyarray type or anycompatiblearray type.
|
||||
|
||||
INSERT INTO pg_depend
|
||||
SELECT
|
||||
'pg_proc'::regclass::oid as classid,
|
||||
(SELECT oid FROM pg_proc WHERE proname = 'array_cat_agg') as objid,
|
||||
0 as objsubid,
|
||||
'pg_extension'::regclass::oid as refclassid,
|
||||
(select oid from pg_extension where extname = 'citus') as refobjid,
|
||||
0 as refobjsubid ,
|
||||
'e' as deptype;
|
||||
|
||||
-- PG16 has its own any_value, so only create it pre PG16.
|
||||
-- We can remove this part when we drop support for PG16
|
||||
IF substring(current_Setting('server_version'), '\d+')::int < 16 THEN
|
||||
EXECUTE $cmd$
|
||||
-- disable propagation to prevent EnsureCoordinator errors
|
||||
-- the aggregate created here does not depend on Citus extension (yet)
|
||||
-- since we add the dependency with the next command
|
||||
SET citus.enable_ddl_propagation TO OFF;
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.any_value_agg ( anyelement, anyelement )
|
||||
RETURNS anyelement AS $$
|
||||
SELECT CASE WHEN $1 IS NULL THEN $2 ELSE $1 END;
|
||||
$$ LANGUAGE SQL STABLE;
|
||||
|
||||
CREATE AGGREGATE pg_catalog.any_value (
|
||||
sfunc = pg_catalog.any_value_agg,
|
||||
combinefunc = pg_catalog.any_value_agg,
|
||||
basetype = anyelement,
|
||||
stype = anyelement
|
||||
);
|
||||
COMMENT ON AGGREGATE pg_catalog.any_value(anyelement) IS
|
||||
'Returns the value of any row in the group. It is mostly useful when you know there will be only 1 element.';
|
||||
RESET citus.enable_ddl_propagation;
|
||||
--
|
||||
-- Citus creates the any_value aggregate but because of a compatibility
|
||||
-- issue between pg15-pg16 -- any_value is created in PG16, we drop
|
||||
-- and create it during upgrade IF upgraded version is less than 16.
|
||||
-- And as Citus creates it, there needs to be a dependency to the
|
||||
-- Citus extension, so we create that dependency here.
|
||||
|
||||
INSERT INTO pg_depend
|
||||
SELECT
|
||||
'pg_proc'::regclass::oid as classid,
|
||||
(SELECT oid FROM pg_proc WHERE proname = 'any_value_agg') as objid,
|
||||
0 as objsubid,
|
||||
'pg_extension'::regclass::oid as refclassid,
|
||||
(select oid from pg_extension where extname = 'citus') as refobjid,
|
||||
0 as refobjsubid ,
|
||||
'e' as deptype;
|
||||
|
||||
INSERT INTO pg_depend
|
||||
SELECT
|
||||
'pg_proc'::regclass::oid as classid,
|
||||
(SELECT oid FROM pg_proc WHERE proname = 'any_value') as objid,
|
||||
0 as objsubid,
|
||||
'pg_extension'::regclass::oid as refclassid,
|
||||
(select oid from pg_extension where extname = 'citus') as refobjid,
|
||||
0 as refobjsubid ,
|
||||
'e' as deptype;
|
||||
$cmd$;
|
||||
END IF;
|
||||
|
||||
--
|
||||
-- restore citus catalog tables
|
||||
--
|
||||
INSERT INTO pg_catalog.pg_dist_partition SELECT * FROM public.pg_dist_partition;
|
||||
|
||||
-- if we are upgrading from PG14/PG15 to PG16+,
|
||||
-- we need to regenerate the partkeys because they will include varnullingrels as well.
|
||||
UPDATE pg_catalog.pg_dist_partition
|
||||
SET partkey = column_name_to_column(pg_dist_partkeys_pre_16_upgrade.logicalrelid, col_name)
|
||||
FROM public.pg_dist_partkeys_pre_16_upgrade
|
||||
WHERE pg_dist_partkeys_pre_16_upgrade.logicalrelid = pg_dist_partition.logicalrelid;
|
||||
DROP TABLE public.pg_dist_partkeys_pre_16_upgrade;
|
||||
|
||||
-- if we are upgrading to PG18+,
|
||||
-- we need to regenerate the partkeys because they will include varreturningtype as well.
|
||||
UPDATE pg_catalog.pg_dist_partition
|
||||
SET partkey = column_name_to_column(pg_dist_partkeys_pre_18_upgrade.logicalrelid, col_name)
|
||||
FROM public.pg_dist_partkeys_pre_18_upgrade
|
||||
WHERE pg_dist_partkeys_pre_18_upgrade.logicalrelid = pg_dist_partition.logicalrelid;
|
||||
DROP TABLE public.pg_dist_partkeys_pre_18_upgrade;
|
||||
|
||||
INSERT INTO pg_catalog.pg_dist_shard SELECT * FROM public.pg_dist_shard;
|
||||
INSERT INTO pg_catalog.pg_dist_placement SELECT * FROM public.pg_dist_placement;
|
||||
INSERT INTO pg_catalog.pg_dist_node_metadata SELECT * FROM public.pg_dist_node_metadata;
|
||||
INSERT INTO pg_catalog.pg_dist_node SELECT * FROM public.pg_dist_node;
|
||||
INSERT INTO pg_catalog.pg_dist_local_group SELECT * FROM public.pg_dist_local_group;
|
||||
INSERT INTO pg_catalog.pg_dist_transaction SELECT * FROM public.pg_dist_transaction;
|
||||
INSERT INTO pg_catalog.pg_dist_colocation SELECT * FROM public.pg_dist_colocation;
|
||||
INSERT INTO pg_catalog.pg_dist_cleanup SELECT * FROM public.pg_dist_cleanup;
|
||||
INSERT INTO pg_catalog.pg_dist_schema SELECT schemaname::regnamespace, colocationid FROM public.pg_dist_schema;
|
||||
-- enterprise catalog tables
|
||||
INSERT INTO pg_catalog.pg_dist_authinfo SELECT * FROM public.pg_dist_authinfo;
|
||||
INSERT INTO pg_catalog.pg_dist_poolinfo SELECT * FROM public.pg_dist_poolinfo;
|
||||
|
||||
-- Temporarily disable trigger to check for validity of functions while
|
||||
-- inserting. The current contents of the table might be invalid if one of
|
||||
-- the functions was removed by the user without also removing the
|
||||
-- rebalance strategy. Obviously that's not great, but it should be no
|
||||
-- reason to fail the upgrade.
|
||||
ALTER TABLE pg_catalog.pg_dist_rebalance_strategy DISABLE TRIGGER pg_dist_rebalance_strategy_validation_trigger;
|
||||
INSERT INTO pg_catalog.pg_dist_rebalance_strategy SELECT
|
||||
name,
|
||||
default_strategy,
|
||||
shard_cost_function::regprocedure::regproc,
|
||||
node_capacity_function::regprocedure::regproc,
|
||||
shard_allowed_on_node_function::regprocedure::regproc,
|
||||
default_threshold,
|
||||
minimum_threshold,
|
||||
improvement_threshold
|
||||
FROM public.pg_dist_rebalance_strategy;
|
||||
ALTER TABLE pg_catalog.pg_dist_rebalance_strategy ENABLE TRIGGER pg_dist_rebalance_strategy_validation_trigger;
|
||||
|
||||
--
|
||||
-- drop backup tables
|
||||
--
|
||||
DROP TABLE public.pg_dist_authinfo;
|
||||
DROP TABLE public.pg_dist_colocation;
|
||||
DROP TABLE public.pg_dist_local_group;
|
||||
DROP TABLE public.pg_dist_node;
|
||||
DROP TABLE public.pg_dist_node_metadata;
|
||||
DROP TABLE public.pg_dist_partition;
|
||||
DROP TABLE public.pg_dist_placement;
|
||||
DROP TABLE public.pg_dist_poolinfo;
|
||||
DROP TABLE public.pg_dist_shard;
|
||||
DROP TABLE public.pg_dist_transaction;
|
||||
DROP TABLE public.pg_dist_rebalance_strategy;
|
||||
DROP TABLE public.pg_dist_cleanup;
|
||||
DROP TABLE public.pg_dist_schema;
|
||||
--
|
||||
-- reset sequences
|
||||
--
|
||||
PERFORM setval('pg_catalog.pg_dist_shardid_seq', (SELECT MAX(shardid)+1 AS max_shard_id FROM pg_dist_shard), false);
|
||||
PERFORM setval('pg_catalog.pg_dist_placement_placementid_seq', (SELECT MAX(placementid)+1 AS max_placement_id FROM pg_dist_placement), false);
|
||||
PERFORM setval('pg_catalog.pg_dist_groupid_seq', (SELECT MAX(groupid)+1 AS max_group_id FROM pg_dist_node), false);
|
||||
PERFORM setval('pg_catalog.pg_dist_node_nodeid_seq', (SELECT MAX(nodeid)+1 AS max_node_id FROM pg_dist_node), false);
|
||||
PERFORM setval('pg_catalog.pg_dist_colocationid_seq', (SELECT MAX(colocationid)+1 AS max_colocation_id FROM pg_dist_colocation), false);
|
||||
PERFORM setval('pg_catalog.pg_dist_operationid_seq', (SELECT MAX(operation_id)+1 AS max_operation_id FROM pg_dist_cleanup), false);
|
||||
PERFORM setval('pg_catalog.pg_dist_cleanup_recordid_seq', (SELECT MAX(record_id)+1 AS max_record_id FROM pg_dist_cleanup), false);
|
||||
PERFORM setval('pg_catalog.pg_dist_clock_logical_seq', (SELECT last_value FROM public.pg_dist_clock_logical_seq), false);
|
||||
DROP TABLE public.pg_dist_clock_logical_seq;
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- register triggers
|
||||
--
|
||||
FOR table_name IN SELECT logicalrelid FROM pg_catalog.pg_dist_partition JOIN pg_class ON (logicalrelid = oid) WHERE relkind <> 'f'
|
||||
LOOP
|
||||
trigger_name := 'truncate_trigger_' || table_name::oid;
|
||||
command := 'create trigger ' || trigger_name || ' after truncate on ' || table_name || ' execute procedure pg_catalog.citus_truncate_trigger()';
|
||||
EXECUTE command;
|
||||
command := 'update pg_trigger set tgisinternal = true where tgname = ' || quote_literal(trigger_name);
|
||||
EXECUTE command;
|
||||
END LOOP;
|
||||
|
||||
--
|
||||
-- set dependencies
|
||||
--
|
||||
INSERT INTO pg_depend
|
||||
SELECT
|
||||
'pg_class'::regclass::oid as classid,
|
||||
p.logicalrelid::regclass::oid as objid,
|
||||
0 as objsubid,
|
||||
'pg_extension'::regclass::oid as refclassid,
|
||||
(select oid from pg_extension where extname = 'citus') as refobjid,
|
||||
0 as refobjsubid ,
|
||||
'n' as deptype
|
||||
FROM pg_catalog.pg_dist_partition p;
|
||||
|
||||
-- If citus_columnar extension exists, then perform the post PG-upgrade work for columnar as well.
|
||||
--
|
||||
-- First look if pg_catalog.columnar_finish_pg_upgrade function exists as part of the citus_columnar
|
||||
-- extension. (We check whether it's part of the extension just for security reasons). If it does, then
|
||||
-- call it. If not, then look for columnar_internal.columnar_ensure_am_depends_catalog function and as
|
||||
-- part of the citus_columnar extension. If so, then call it. We alternatively check for the latter UDF
|
||||
-- just because pg_catalog.columnar_finish_pg_upgrade function is introduced in citus_columnar 13.2-1
|
||||
-- and as of today all it does is to call columnar_internal.columnar_ensure_am_depends_catalog function.
|
||||
IF EXISTS (
|
||||
SELECT 1 FROM pg_depend
|
||||
JOIN pg_proc ON (pg_depend.objid = pg_proc.oid)
|
||||
JOIN pg_namespace ON (pg_proc.pronamespace = pg_namespace.oid)
|
||||
JOIN pg_extension ON (pg_depend.refobjid = pg_extension.oid)
|
||||
WHERE
|
||||
-- Looking if pg_catalog.columnar_finish_pg_upgrade function exists and
|
||||
-- if there is a dependency record from it (proc class = 1255) ..
|
||||
pg_depend.classid = 1255 AND pg_namespace.nspname = 'pg_catalog' AND pg_proc.proname = 'columnar_finish_pg_upgrade' AND
|
||||
-- .. to citus_columnar extension (3079 = extension class), if it exists.
|
||||
pg_depend.refclassid = 3079 AND pg_extension.extname = 'citus_columnar'
|
||||
)
|
||||
THEN PERFORM pg_catalog.columnar_finish_pg_upgrade();
|
||||
ELSIF EXISTS (
|
||||
SELECT 1 FROM pg_depend
|
||||
JOIN pg_proc ON (pg_depend.objid = pg_proc.oid)
|
||||
JOIN pg_namespace ON (pg_proc.pronamespace = pg_namespace.oid)
|
||||
JOIN pg_extension ON (pg_depend.refobjid = pg_extension.oid)
|
||||
WHERE
|
||||
-- Looking if columnar_internal.columnar_ensure_am_depends_catalog function exists and
|
||||
-- if there is a dependency record from it (proc class = 1255) ..
|
||||
pg_depend.classid = 1255 AND pg_namespace.nspname = 'columnar_internal' AND pg_proc.proname = 'columnar_ensure_am_depends_catalog' AND
|
||||
-- .. to citus_columnar extension (3079 = extension class), if it exists.
|
||||
pg_depend.refclassid = 3079 AND pg_extension.extname = 'citus_columnar'
|
||||
)
|
||||
THEN PERFORM columnar_internal.columnar_ensure_am_depends_catalog();
|
||||
END IF;
|
||||
|
||||
-- restore pg_dist_object from the stable identifiers
|
||||
TRUNCATE pg_catalog.pg_dist_object;
|
||||
INSERT INTO pg_catalog.pg_dist_object (classid, objid, objsubid, distribution_argument_index, colocationid)
|
||||
SELECT
|
||||
address.classid,
|
||||
address.objid,
|
||||
address.objsubid,
|
||||
naming.distribution_argument_index,
|
||||
naming.colocationid
|
||||
FROM
|
||||
public.pg_dist_object naming,
|
||||
pg_catalog.pg_get_object_address(naming.type, naming.object_names, naming.object_args) address;
|
||||
|
||||
DROP TABLE public.pg_dist_object;
|
||||
END;
|
||||
$cppu$;
|
||||
|
||||
COMMENT ON FUNCTION pg_catalog.citus_finish_pg_upgrade()
|
||||
IS 'perform tasks to restore citus settings from a location that has been prepared before pg_upgrade';
|
||||
|
|
@ -115,6 +115,14 @@ BEGIN
|
|||
WHERE pg_dist_partkeys_pre_16_upgrade.logicalrelid = pg_dist_partition.logicalrelid;
|
||||
DROP TABLE public.pg_dist_partkeys_pre_16_upgrade;
|
||||
|
||||
-- if we are upgrading to PG18+,
|
||||
-- we need to regenerate the partkeys because they will include varreturningtype as well.
|
||||
UPDATE pg_catalog.pg_dist_partition
|
||||
SET partkey = column_name_to_column(pg_dist_partkeys_pre_18_upgrade.logicalrelid, col_name)
|
||||
FROM public.pg_dist_partkeys_pre_18_upgrade
|
||||
WHERE pg_dist_partkeys_pre_18_upgrade.logicalrelid = pg_dist_partition.logicalrelid;
|
||||
DROP TABLE public.pg_dist_partkeys_pre_18_upgrade;
|
||||
|
||||
INSERT INTO pg_catalog.pg_dist_shard SELECT * FROM public.pg_dist_shard;
|
||||
INSERT INTO pg_catalog.pg_dist_placement SELECT * FROM public.pg_dist_placement;
|
||||
INSERT INTO pg_catalog.pg_dist_node_metadata SELECT * FROM public.pg_dist_node_metadata;
|
||||
|
|
|
|||
|
|
@ -0,0 +1,111 @@
|
|||
CREATE OR REPLACE FUNCTION pg_catalog.citus_prepare_pg_upgrade()
|
||||
RETURNS void
|
||||
LANGUAGE plpgsql
|
||||
SET search_path = pg_catalog
|
||||
AS $cppu$
|
||||
BEGIN
|
||||
|
||||
DELETE FROM pg_depend WHERE
|
||||
objid IN (SELECT oid FROM pg_proc WHERE proname = 'array_cat_agg') AND
|
||||
refobjid IN (select oid from pg_extension where extname = 'citus');
|
||||
--
|
||||
-- We are dropping the aggregates because postgres 14 changed
|
||||
-- array_cat type from anyarray to anycompatiblearray. When
|
||||
-- upgrading to pg14, specifically when running pg_restore on
|
||||
-- array_cat_agg we would get an error. So we drop the aggregate
|
||||
-- and create the right one on citus_finish_pg_upgrade.
|
||||
|
||||
DROP AGGREGATE IF EXISTS array_cat_agg(anyarray);
|
||||
DROP AGGREGATE IF EXISTS array_cat_agg(anycompatiblearray);
|
||||
|
||||
-- We should drop any_value because PG16+ has its own any_value function
|
||||
-- We can remove this part when we drop support for PG16
|
||||
IF substring(current_Setting('server_version'), '\d+')::int < 16 THEN
|
||||
DELETE FROM pg_depend WHERE
|
||||
objid IN (SELECT oid FROM pg_proc WHERE proname = 'any_value' OR proname = 'any_value_agg') AND
|
||||
refobjid IN (select oid from pg_extension where extname = 'citus');
|
||||
DROP AGGREGATE IF EXISTS pg_catalog.any_value(anyelement);
|
||||
DROP FUNCTION IF EXISTS pg_catalog.any_value_agg(anyelement, anyelement);
|
||||
END IF;
|
||||
|
||||
--
|
||||
-- Drop existing backup tables
|
||||
--
|
||||
DROP TABLE IF EXISTS public.pg_dist_partition;
|
||||
DROP TABLE IF EXISTS public.pg_dist_shard;
|
||||
DROP TABLE IF EXISTS public.pg_dist_placement;
|
||||
DROP TABLE IF EXISTS public.pg_dist_node_metadata;
|
||||
DROP TABLE IF EXISTS public.pg_dist_node;
|
||||
DROP TABLE IF EXISTS public.pg_dist_local_group;
|
||||
DROP TABLE IF EXISTS public.pg_dist_transaction;
|
||||
DROP TABLE IF EXISTS public.pg_dist_colocation;
|
||||
DROP TABLE IF EXISTS public.pg_dist_authinfo;
|
||||
DROP TABLE IF EXISTS public.pg_dist_poolinfo;
|
||||
DROP TABLE IF EXISTS public.pg_dist_rebalance_strategy;
|
||||
DROP TABLE IF EXISTS public.pg_dist_object;
|
||||
DROP TABLE IF EXISTS public.pg_dist_cleanup;
|
||||
DROP TABLE IF EXISTS public.pg_dist_schema;
|
||||
DROP TABLE IF EXISTS public.pg_dist_clock_logical_seq;
|
||||
|
||||
--
|
||||
-- backup citus catalog tables
|
||||
--
|
||||
CREATE TABLE public.pg_dist_partition AS SELECT * FROM pg_catalog.pg_dist_partition;
|
||||
CREATE TABLE public.pg_dist_shard AS SELECT * FROM pg_catalog.pg_dist_shard;
|
||||
CREATE TABLE public.pg_dist_placement AS SELECT * FROM pg_catalog.pg_dist_placement;
|
||||
CREATE TABLE public.pg_dist_node_metadata AS SELECT * FROM pg_catalog.pg_dist_node_metadata;
|
||||
CREATE TABLE public.pg_dist_node AS SELECT * FROM pg_catalog.pg_dist_node;
|
||||
CREATE TABLE public.pg_dist_local_group AS SELECT * FROM pg_catalog.pg_dist_local_group;
|
||||
CREATE TABLE public.pg_dist_transaction AS SELECT * FROM pg_catalog.pg_dist_transaction;
|
||||
CREATE TABLE public.pg_dist_colocation AS SELECT * FROM pg_catalog.pg_dist_colocation;
|
||||
CREATE TABLE public.pg_dist_cleanup AS SELECT * FROM pg_catalog.pg_dist_cleanup;
|
||||
-- save names of the tenant schemas instead of their oids because the oids might change after pg upgrade
|
||||
CREATE TABLE public.pg_dist_schema AS SELECT schemaid::regnamespace::text AS schemaname, colocationid FROM pg_catalog.pg_dist_schema;
|
||||
-- enterprise catalog tables
|
||||
CREATE TABLE public.pg_dist_authinfo AS SELECT * FROM pg_catalog.pg_dist_authinfo;
|
||||
CREATE TABLE public.pg_dist_poolinfo AS SELECT * FROM pg_catalog.pg_dist_poolinfo;
|
||||
-- sequences
|
||||
CREATE TABLE public.pg_dist_clock_logical_seq AS SELECT last_value FROM pg_catalog.pg_dist_clock_logical_seq;
|
||||
CREATE TABLE public.pg_dist_rebalance_strategy AS SELECT
|
||||
name,
|
||||
default_strategy,
|
||||
shard_cost_function::regprocedure::text,
|
||||
node_capacity_function::regprocedure::text,
|
||||
shard_allowed_on_node_function::regprocedure::text,
|
||||
default_threshold,
|
||||
minimum_threshold,
|
||||
improvement_threshold
|
||||
FROM pg_catalog.pg_dist_rebalance_strategy;
|
||||
|
||||
-- store upgrade stable identifiers on pg_dist_object catalog
|
||||
CREATE TABLE public.pg_dist_object AS SELECT
|
||||
address.type,
|
||||
address.object_names,
|
||||
address.object_args,
|
||||
objects.distribution_argument_index,
|
||||
objects.colocationid
|
||||
FROM pg_catalog.pg_dist_object objects,
|
||||
pg_catalog.pg_identify_object_as_address(objects.classid, objects.objid, objects.objsubid) address;
|
||||
|
||||
-- if we are upgrading from PG14/PG15 to PG16+,
|
||||
-- we will need to regenerate the partkeys because they will include varnullingrels as well.
|
||||
-- so we save the partkeys as column names here
|
||||
CREATE TABLE IF NOT EXISTS public.pg_dist_partkeys_pre_16_upgrade AS
|
||||
SELECT logicalrelid, column_to_column_name(logicalrelid, partkey) as col_name
|
||||
FROM pg_catalog.pg_dist_partition WHERE partkey IS NOT NULL AND partkey NOT ILIKE '%varnullingrels%';
|
||||
|
||||
-- similarly, if we are upgrading to PG18+,
|
||||
-- we will need to regenerate the partkeys because they will include varreturningtype as well.
|
||||
-- so we save the partkeys as column names here
|
||||
CREATE TABLE IF NOT EXISTS public.pg_dist_partkeys_pre_18_upgrade AS
|
||||
SELECT logicalrelid, column_to_column_name(logicalrelid, partkey) as col_name
|
||||
FROM pg_catalog.pg_dist_partition WHERE partkey IS NOT NULL AND partkey NOT ILIKE '%varreturningtype%';
|
||||
-- remove duplicates (we would only have duplicates if we are upgrading from pre-16 to PG18+)
|
||||
DELETE FROM public.pg_dist_partkeys_pre_18_upgrade USING public.pg_dist_partkeys_pre_16_upgrade p16
|
||||
WHERE public.pg_dist_partkeys_pre_18_upgrade.logicalrelid = p16.logicalrelid
|
||||
AND public.pg_dist_partkeys_pre_18_upgrade.col_name = p16.col_name;
|
||||
END;
|
||||
$cppu$;
|
||||
|
||||
COMMENT ON FUNCTION pg_catalog.citus_prepare_pg_upgrade()
|
||||
IS 'perform tasks to copy citus settings to a location that could later be restored after pg_upgrade is done';
|
||||
|
|
@ -93,6 +93,17 @@ BEGIN
|
|||
CREATE TABLE IF NOT EXISTS public.pg_dist_partkeys_pre_16_upgrade AS
|
||||
SELECT logicalrelid, column_to_column_name(logicalrelid, partkey) as col_name
|
||||
FROM pg_catalog.pg_dist_partition WHERE partkey IS NOT NULL AND partkey NOT ILIKE '%varnullingrels%';
|
||||
|
||||
-- similarly, if we are upgrading to PG18+,
|
||||
-- we will need to regenerate the partkeys because they will include varreturningtype as well.
|
||||
-- so we save the partkeys as column names here
|
||||
CREATE TABLE IF NOT EXISTS public.pg_dist_partkeys_pre_18_upgrade AS
|
||||
SELECT logicalrelid, column_to_column_name(logicalrelid, partkey) as col_name
|
||||
FROM pg_catalog.pg_dist_partition WHERE partkey IS NOT NULL AND partkey NOT ILIKE '%varreturningtype%';
|
||||
-- remove duplicates (we would only have duplicates if we are upgrading from pre-16 to PG18+)
|
||||
DELETE FROM public.pg_dist_partkeys_pre_18_upgrade USING public.pg_dist_partkeys_pre_16_upgrade p16
|
||||
WHERE public.pg_dist_partkeys_pre_18_upgrade.logicalrelid = p16.logicalrelid
|
||||
AND public.pg_dist_partkeys_pre_18_upgrade.col_name = p16.col_name;
|
||||
END;
|
||||
$cppu$;
|
||||
|
||||
|
|
|
|||
|
|
@ -49,6 +49,9 @@ ExecutorEnd_hook_type prev_ExecutorEnd = NULL;
|
|||
#define STAT_TENANTS_COLUMNS 9
|
||||
#define ONE_QUERY_SCORE 1000000000
|
||||
|
||||
/* this doesn't attempt dereferencing given input and is computed in compile-time, so it's safe */
|
||||
#define TENANT_STATS_SCORE_FIELD_BIT_LENGTH (sizeof(((TenantStats *) NULL)->score) * 8)
|
||||
|
||||
static char AttributeToTenant[MAX_TENANT_ATTRIBUTE_LENGTH] = "";
|
||||
static CmdType AttributeToCommandType = CMD_UNKNOWN;
|
||||
static int AttributeToColocationGroupId = INVALID_COLOCATION_ID;
|
||||
|
|
@ -605,8 +608,13 @@ ReduceScoreIfNecessary(TenantStats *tenantStats, TimestampTz queryTime)
|
|||
*/
|
||||
if (periodCountAfterLastScoreReduction > 0)
|
||||
{
|
||||
tenantStats->score >>= periodCountAfterLastScoreReduction;
|
||||
tenantStats->lastScoreReduction = queryTime;
|
||||
|
||||
/* addtional check to avoid undefined behavior */
|
||||
tenantStats->score = (periodCountAfterLastScoreReduction <
|
||||
TENANT_STATS_SCORE_FIELD_BIT_LENGTH)
|
||||
? tenantStats->score >> periodCountAfterLastScoreReduction
|
||||
: 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -137,6 +137,8 @@ CopyNodeDistributedPlan(COPYFUNC_ARGS)
|
|||
COPY_SCALAR_FIELD(fastPathRouterPlan);
|
||||
COPY_SCALAR_FIELD(numberOfTimesExecuted);
|
||||
COPY_NODE_FIELD(planningError);
|
||||
|
||||
COPY_SCALAR_FIELD(sourceResultRepartitionColumnIndex);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -370,6 +370,20 @@ DistOpsValidityState(Node *node, const DistributeObjectOps *ops)
|
|||
{
|
||||
if (ops && ops->operationType == DIST_OPS_CREATE)
|
||||
{
|
||||
/*
|
||||
* We should beware of qualifying the CREATE statement too early.
|
||||
*/
|
||||
if (nodeTag(node) == T_CreateDomainStmt)
|
||||
{
|
||||
/*
|
||||
* Create Domain statements should be qualified after local creation
|
||||
* because in case of an error in creation, we don't want to print
|
||||
* the error with the qualified name, as that would differ with
|
||||
* vanilla Postgres error output.
|
||||
*/
|
||||
return ShouldQualifyAfterLocalCreation;
|
||||
}
|
||||
|
||||
/*
|
||||
* We should not validate CREATE statements because no address exists
|
||||
* here yet.
|
||||
|
|
|
|||
|
|
@ -203,6 +203,7 @@ OutDistributedPlan(OUTFUNC_ARGS)
|
|||
WRITE_UINT_FIELD(numberOfTimesExecuted);
|
||||
|
||||
WRITE_NODE_FIELD(planningError);
|
||||
WRITE_INT_FIELD(sourceResultRepartitionColumnIndex);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -73,34 +73,8 @@ PG_FUNCTION_INFO_V1(update_distributed_table_colocation);
|
|||
Datum
|
||||
mark_tables_colocated(PG_FUNCTION_ARGS)
|
||||
{
|
||||
CheckCitusVersion(ERROR);
|
||||
EnsureCoordinator();
|
||||
|
||||
Oid sourceRelationId = PG_GETARG_OID(0);
|
||||
ArrayType *relationIdArrayObject = PG_GETARG_ARRAYTYPE_P(1);
|
||||
|
||||
int relationCount = ArrayObjectCount(relationIdArrayObject);
|
||||
if (relationCount < 1)
|
||||
{
|
||||
ereport(ERROR, (errmsg("at least one target table is required for this "
|
||||
"operation")));
|
||||
}
|
||||
|
||||
EnsureTableOwner(sourceRelationId);
|
||||
|
||||
Datum *relationIdDatumArray = DeconstructArrayObject(relationIdArrayObject);
|
||||
|
||||
for (int relationIndex = 0; relationIndex < relationCount; relationIndex++)
|
||||
{
|
||||
Oid nextRelationOid = DatumGetObjectId(relationIdDatumArray[relationIndex]);
|
||||
|
||||
/* we require that the user either owns all tables or is superuser */
|
||||
EnsureTableOwner(nextRelationOid);
|
||||
|
||||
MarkTablesColocated(sourceRelationId, nextRelationOid);
|
||||
}
|
||||
|
||||
PG_RETURN_VOID();
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("this function is deprecated and no longer is used")));
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -1306,7 +1280,7 @@ ColocatedShardIdInRelation(Oid relationId, int shardIndex)
|
|||
/*
|
||||
* DeleteColocationGroupIfNoTablesBelong function deletes given co-location group if there
|
||||
* is no relation in that co-location group. A co-location group may become empty after
|
||||
* mark_tables_colocated or upgrade_reference_table UDF calls. In that case we need to
|
||||
* update_distributed_table_colocation UDF calls. In that case we need to
|
||||
* remove empty co-location group to prevent orphaned co-location groups.
|
||||
*/
|
||||
void
|
||||
|
|
|
|||
|
|
@ -1040,12 +1040,30 @@ MaintenanceDaemonShmemExit(int code, Datum arg)
|
|||
if (myDbData != NULL)
|
||||
{
|
||||
/*
|
||||
* Confirm that I am still the registered maintenance daemon before exiting.
|
||||
* Once the maintenance daemon fails (e.g., due to an error in the main loop),
|
||||
* both Postgres tries to restart the failed daemon and Citus attempt to start
|
||||
* a new one. In that case, the one started by Citus ends up here.
|
||||
*
|
||||
* As the maintenance daemon that Citus tried to start, we might see the entry
|
||||
* for the daemon restarted by Postgres if the system was so slow that it
|
||||
* took a long time for us to be re-scheduled to call MaintenanceDaemonShmemExit(),
|
||||
* e.g., under valgrind testing.
|
||||
*
|
||||
* In that case, we should unregister ourself only if we are still the registered
|
||||
* maintenance daemon.
|
||||
*/
|
||||
Assert(myDbData->workerPid == MyProcPid);
|
||||
|
||||
myDbData->daemonStarted = false;
|
||||
myDbData->workerPid = 0;
|
||||
if (myDbData->workerPid == MyProcPid)
|
||||
{
|
||||
myDbData->daemonStarted = false;
|
||||
myDbData->workerPid = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
ereport(LOG, (errmsg(
|
||||
"maintenance daemon for database %u has already been replaced by "
|
||||
"Postgres, skipping to unregister this maintenance daemon",
|
||||
databaseOid)));
|
||||
}
|
||||
}
|
||||
|
||||
LWLockRelease(&MaintenanceDaemonControl->lock);
|
||||
|
|
|
|||
|
|
@ -64,6 +64,14 @@
|
|||
/*global variables for citus_columnar fake version Y */
|
||||
#define CITUS_COLUMNAR_INTERNAL_VERSION "11.1-0"
|
||||
|
||||
/*
|
||||
* We can't rely on RelidByRelfilenumber for temp tables since PG18(it was backpatched
|
||||
* through PG13), so we can use this macro to define relid within relation in case of
|
||||
* temp relations. Otherwise RelidByRelfilenumber should be used.
|
||||
*/
|
||||
#define RelationPrecomputeOid(a) (RelationUsesLocalBuffers(a) ? RelationGetRelid(a) : \
|
||||
InvalidOid)
|
||||
|
||||
/*
|
||||
* ColumnarOptions holds the option values to be used when reading or writing
|
||||
* a columnar table. To resolve these values, we first check foreign table's options,
|
||||
|
|
@ -232,7 +240,7 @@ extern void columnar_init_gucs(void);
|
|||
extern CompressionType ParseCompressionType(const char *compressionTypeString);
|
||||
|
||||
/* Function declarations for writing to a columnar table */
|
||||
extern ColumnarWriteState * ColumnarBeginWrite(RelFileLocator relfilelocator,
|
||||
extern ColumnarWriteState * ColumnarBeginWrite(Relation rel,
|
||||
ColumnarOptions options,
|
||||
TupleDesc tupleDescriptor);
|
||||
extern uint64 ColumnarWriteRow(ColumnarWriteState *state, Datum *columnValues,
|
||||
|
|
@ -287,21 +295,21 @@ extern PGDLLEXPORT bool ReadColumnarOptions(Oid regclass, ColumnarOptions *optio
|
|||
extern PGDLLEXPORT bool IsColumnarTableAmTable(Oid relationId);
|
||||
|
||||
/* columnar_metadata_tables.c */
|
||||
extern void DeleteMetadataRows(RelFileLocator relfilelocator);
|
||||
extern void DeleteMetadataRows(Relation rel);
|
||||
extern uint64 ColumnarMetadataNewStorageId(void);
|
||||
extern uint64 GetHighestUsedAddress(RelFileLocator relfilelocator);
|
||||
extern uint64 GetHighestUsedAddress(Relation rel);
|
||||
extern EmptyStripeReservation * ReserveEmptyStripe(Relation rel, uint64 columnCount,
|
||||
uint64 chunkGroupRowCount,
|
||||
uint64 stripeRowCount);
|
||||
extern StripeMetadata * CompleteStripeReservation(Relation rel, uint64 stripeId,
|
||||
uint64 sizeBytes, uint64 rowCount,
|
||||
uint64 chunkCount);
|
||||
extern void SaveStripeSkipList(RelFileLocator relfilelocator, uint64 stripe,
|
||||
extern void SaveStripeSkipList(Oid relid, RelFileLocator relfilelocator, uint64 stripe,
|
||||
StripeSkipList *stripeSkipList,
|
||||
TupleDesc tupleDescriptor);
|
||||
extern void SaveChunkGroups(RelFileLocator relfilelocator, uint64 stripe,
|
||||
extern void SaveChunkGroups(Oid relid, RelFileLocator relfilelocator, uint64 stripe,
|
||||
List *chunkGroupRowCounts);
|
||||
extern StripeSkipList * ReadStripeSkipList(RelFileLocator relfilelocator, uint64 stripe,
|
||||
extern StripeSkipList * ReadStripeSkipList(Relation rel, uint64 stripe,
|
||||
TupleDesc tupleDescriptor,
|
||||
uint32 chunkCount,
|
||||
Snapshot snapshot);
|
||||
|
|
@ -317,6 +325,7 @@ extern uint64 StripeGetHighestRowNumber(StripeMetadata *stripeMetadata);
|
|||
extern StripeMetadata * FindStripeWithHighestRowNumber(Relation relation,
|
||||
Snapshot snapshot);
|
||||
extern Datum columnar_relation_storageid(PG_FUNCTION_ARGS);
|
||||
extern Oid ColumnarRelationId(Oid relid, RelFileLocator relfilelocator);
|
||||
|
||||
|
||||
/* write_state_management.c */
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ typedef struct EmptyStripeReservation
|
|||
uint64 stripeFirstRowNumber;
|
||||
} EmptyStripeReservation;
|
||||
|
||||
extern List * StripesForRelfilelocator(RelFileLocator relfilelocator);
|
||||
extern List * StripesForRelfilelocator(Relation rel);
|
||||
extern void ColumnarStorageUpdateIfNeeded(Relation rel, bool isUpgrade);
|
||||
extern List * ExtractColumnarRelOptions(List *inOptions, List **outColumnarOptions);
|
||||
extern void SetColumnarRelOptions(RangeVar *rv, List *reloptions);
|
||||
|
|
|
|||
|
|
@ -25,12 +25,4 @@
|
|||
#define ExplainPropertyLong(qlabel, value, es) \
|
||||
ExplainPropertyInteger(qlabel, NULL, value, es)
|
||||
|
||||
|
||||
/* tuple-descriptor attributes moved in PostgreSQL 18: */
|
||||
#if PG_VERSION_NUM >= PG_VERSION_18
|
||||
#define Attr(tupdesc, colno) TupleDescAttr((tupdesc), (colno))
|
||||
#else
|
||||
#define Attr(tupdesc, colno) (&((tupdesc)->attrs[(colno)]))
|
||||
#endif
|
||||
|
||||
#endif /* COLUMNAR_COMPAT_H */
|
||||
|
|
|
|||
|
|
@ -25,7 +25,8 @@ typedef enum DistOpsValidationState
|
|||
HasAtLeastOneValidObject,
|
||||
HasNoneValidObject,
|
||||
HasObjectWithInvalidOwnership,
|
||||
NoAddressResolutionRequired
|
||||
NoAddressResolutionRequired,
|
||||
ShouldQualifyAfterLocalCreation
|
||||
} DistOpsValidationState;
|
||||
|
||||
extern void SetLocalClientMinMessagesIfRunningPGTests(int
|
||||
|
|
|
|||
|
|
@ -112,5 +112,6 @@ extern void UndistributeDisconnectedCitusLocalTables(void);
|
|||
extern void NotifyUtilityHookConstraintDropped(void);
|
||||
extern void ResetConstraintDropped(void);
|
||||
extern void ExecuteDistributedDDLJob(DDLJob *ddlJob);
|
||||
extern bool IsDroppedOrGenerated(Form_pg_attribute attr);
|
||||
|
||||
#endif /* MULTI_UTILITY_H */
|
||||
|
|
|
|||
|
|
@ -119,6 +119,7 @@ typedef struct FastPathRestrictionContext
|
|||
bool delayFastPathPlanning;
|
||||
} FastPathRestrictionContext;
|
||||
|
||||
struct DistributedPlanningContext;
|
||||
typedef struct PlannerRestrictionContext
|
||||
{
|
||||
RelationRestrictionContext *relationRestrictionContext;
|
||||
|
|
@ -132,6 +133,18 @@ typedef struct PlannerRestrictionContext
|
|||
*/
|
||||
FastPathRestrictionContext *fastPathRestrictionContext;
|
||||
MemoryContext memoryContext;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_18
|
||||
|
||||
/*
|
||||
* Enable access to the distributed planning context from
|
||||
* planner hooks called by Postgres. Enables Citus to track
|
||||
* changes made by Postgres to the query tree (such as
|
||||
* expansion of virtual columns) and ensure they are reflected
|
||||
* back to subsequent distributed planning.
|
||||
*/
|
||||
struct DistributedPlanningContext *planContext;
|
||||
#endif
|
||||
} PlannerRestrictionContext;
|
||||
|
||||
typedef struct RelationShard
|
||||
|
|
|
|||
|
|
@ -586,7 +586,8 @@ extern DistributedPlan * CreatePhysicalDistributedPlan(MultiTreeRoot *multiTree,
|
|||
plannerRestrictionContext);
|
||||
extern Task * CreateBasicTask(uint64 jobId, uint32 taskId, TaskType taskType,
|
||||
char *queryString);
|
||||
|
||||
extern OpExpr * MakeOpExpressionExtended(Var *leftVar, Expr *rightArg,
|
||||
int16 strategyNumber);
|
||||
extern OpExpr * MakeOpExpression(Var *variable, int16 strategyNumber);
|
||||
extern Node * WrapUngroupedVarsInAnyValueAggregate(Node *expression,
|
||||
List *groupClauseList,
|
||||
|
|
|
|||
|
|
@ -49,6 +49,6 @@ extern DeferredErrorMessage * DeferErrorIfCannotPushdownSubquery(Query *subquery
|
|||
extern DeferredErrorMessage * DeferErrorIfUnsupportedUnionQuery(Query *queryTree);
|
||||
extern bool IsJsonTableRTE(RangeTblEntry *rte);
|
||||
extern bool IsOuterJoinExpr(Node *node);
|
||||
|
||||
extern void FlattenGroupExprs(Query *query);
|
||||
|
||||
#endif /* QUERY_PUSHDOWN_PLANNING_H */
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@
|
|||
#include "distributed/relation_restriction_equivalence.h"
|
||||
|
||||
extern bool EnableRecurringOuterJoinPushdown;
|
||||
extern bool EnableOuterJoinsWithPseudoconstantQualsPrePG17;
|
||||
typedef struct RecursivePlanningContextInternal RecursivePlanningContext;
|
||||
|
||||
typedef struct RangeTblEntryIndex
|
||||
|
|
|
|||
|
|
@ -320,6 +320,21 @@ check-citus-upgrade-mixed-local: all clean-upgrade-artifacts
|
|||
--citus-old-version=$(citus-old-version) \
|
||||
--mixed
|
||||
|
||||
check-citus-minor-upgrade: all
|
||||
$(citus_upgrade_check) \
|
||||
--bindir=$(bindir) \
|
||||
--pgxsdir=$(pgxsdir) \
|
||||
--citus-pre-tar=$(citus-pre-tar) \
|
||||
--citus-post-tar=$(citus-post-tar) \
|
||||
--minor-upgrade
|
||||
|
||||
check-citus-minor-upgrade-local: all clean-upgrade-artifacts
|
||||
$(citus_upgrade_check) \
|
||||
--bindir=$(bindir) \
|
||||
--pgxsdir=$(pgxsdir) \
|
||||
--citus-old-version=$(citus-old-version) \
|
||||
--minor-upgrade
|
||||
|
||||
clean-upgrade-artifacts:
|
||||
rm -rf $(citus_abs_srcdir)/tmp_citus_upgrade/ /tmp/citus_copy/
|
||||
|
||||
|
|
|
|||
|
|
@ -297,7 +297,13 @@ s/(NOTICE: issuing CREATE EXTENSION IF NOT EXISTS citus_columnar WITH SCHEMA p
|
|||
|
||||
s/, password_required=false//g
|
||||
s/provide the file or change sslmode/provide the file, use the system's trusted roots with sslrootcert=system, or change sslmode/g
|
||||
s/(:varcollid [0-9]+) :varlevelsup 0/\1 :varnullingrels (b) :varlevelsup 0/g
|
||||
|
||||
#pg18 varreturningtype - change needed for PG16, PG17 tests
|
||||
s/(:varnullingrels \(b\) :varlevelsup 0) (:varnosyn 1)/\1 :varreturningtype 0 \2/g
|
||||
|
||||
#pg16 varnullingrels and pg18 varreturningtype - change needed for PG15 tests
|
||||
s/(:varcollid [0-9]+) :varlevelsup 0/\1 :varnullingrels (b) :varlevelsup 0 :varreturningtype 0/g
|
||||
|
||||
s/table_name_for_view\.([_a-z0-9]+)(,| |$)/\1\2/g
|
||||
s/permission denied to terminate process/must be a superuser to terminate superuser process/g
|
||||
s/permission denied to cancel query/must be a superuser to cancel superuser query/g
|
||||
|
|
@ -326,16 +332,38 @@ s/\| CHECK ([a-zA-Z])(.*)/| CHECK \(\1\2\)/g
|
|||
|
||||
/DEBUG: drop auto-cascades to type [a-zA-Z_]*.pg_temp_[0-9]*/d
|
||||
|
||||
# pg18 change: strip trailing “.00” (or “.0…”) from actual rows counts
|
||||
s/(actual rows=[0-9]+)\.[0-9]+/\1/g
|
||||
# --- PG18 Actual Rows normalization ---
|
||||
# New in PG18: Actual Rows in EXPLAIN output are now rounded to
|
||||
# 1) 0.50 (and 0.5, 0.5000...) -> 0
|
||||
s/(actual[[:space:]]*rows[[:space:]]*[=:][[:space:]]*)0\.50*/\10/gI
|
||||
s/(actual[^)]*rows[[:space:]]*=[[:space:]]*)0\.50*/\10/gI
|
||||
|
||||
# 2) 0.51+ -> 1
|
||||
s/(actual[[:space:]]*rows[[:space:]]*[=:][[:space:]]*)0\.(5[1-9][0-9]*|[6-9][0-9]*)/\11/gI
|
||||
s/(actual[^)]*rows[[:space:]]*=[[:space:]]*)0\.(5[1-9][0-9]*|[6-9][0-9]*)/\11/gI
|
||||
|
||||
# 3) Strip trivial trailing ".0..." (6.00 -> 6) [keep your existing cross-format rules]
|
||||
s/(actual[[:space:]]*rows[[:space:]]*[=:][[:space:]]*)([0-9]+)\.0+/\1\2/gI
|
||||
s/(actual[^)]*rows[[:space:]]*=[[:space:]]*)([0-9]+)\.0+/\1\2/gI
|
||||
|
||||
# 4) YAML/XML/JSON: strip trailing ".0..."
|
||||
s/(Actual[[:space:]]+Rows:[[:space:]]*[0-9]+)\.0+/\1/gI
|
||||
s/(<Actual-Rows>[0-9]+)\.0+(<\/Actual-Rows>)/\1\2/g
|
||||
s/("Actual[[:space:]]+Rows":[[:space:]]*[0-9]+)\.0+/\1/gI
|
||||
|
||||
# 5) Placeholder cleanups (kept from existing rules; harmless if unused)
|
||||
# JSON placeholder cleanup: '"Actual Rows": N.N' -> N
|
||||
s/("Actual[[:space:]]+Rows":[[:space:]]*)N\.N/\1N/gI
|
||||
# Text EXPLAIN collapse: "rows=N.N" -> "rows=N"
|
||||
s/(rows[[:space:]]*=[[:space:]]*)N\.N/\1N/gI
|
||||
# YAML placeholder: "Actual Rows: N.N" -> "Actual Rows: N"
|
||||
s/(Actual[[:space:]]+Rows:[[:space:]]*)N\.N/\1N/gI
|
||||
# --- PG18 Actual Rows normalization ---
|
||||
|
||||
# pg18 “Disabled” change start
|
||||
# ignore any “Disabled:” lines in test output
|
||||
/^\s*Disabled:/d
|
||||
|
||||
# ignore any JSON-style Disabled field
|
||||
/^\s*"Disabled":/d
|
||||
|
||||
# ignore XML <Disabled>true</Disabled> or <Disabled>false</Disabled>
|
||||
/^\s*<Disabled>.*<\/Disabled>/d
|
||||
# pg18 “Disabled” change end
|
||||
|
|
@ -344,3 +372,34 @@ s/(actual rows=[0-9]+)\.[0-9]+/\1/g
|
|||
s/^([ \t]*)List of tables$/\1List of relations/g
|
||||
s/^([ \t]*)List of indexes$/\1List of relations/g
|
||||
s/^([ \t]*)List of sequences$/\1List of relations/g
|
||||
|
||||
# --- PG18 FK wording -> legacy generic form ---
|
||||
# e.g., "violates RESTRICT setting of foreign key constraint" -> "violates foreign key constraint"
|
||||
s/violates RESTRICT setting of foreign key constraint/violates foreign key constraint/g
|
||||
# DETAIL line changed "is referenced" -> old "is still referenced"
|
||||
s/\<is referenced from table\>/is still referenced from table/g
|
||||
|
||||
# pg18 extension_control_path GUC debugs
|
||||
# ignore any "find_in_path:" lines in test output
|
||||
/DEBUG: find_in_path: trying .*/d
|
||||
|
||||
# EXPLAIN (PG18+): hide Materialize storage instrumentation
|
||||
# this rule can be removed when PG18 is the minimum supported version
|
||||
/^[ \t]*Storage:[ \t].*$/d
|
||||
|
||||
# PG18: drop 'subscription "<name>"' prefix
|
||||
# this rule can be removed when PG18 is the minimum supported version
|
||||
s/^[[:space:]]*ERROR:[[:space:]]+subscription "[^"]+" could not connect to the publisher:[[:space:]]*/ERROR: could not connect to the publisher: /I
|
||||
# PG18: drop verbose 'connection to server … failed:' preamble
|
||||
s/^[[:space:]]*ERROR:[[:space:]]+could not connect to the publisher:[[:space:]]*connection to server .* failed:[[:space:]]*/ERROR: could not connect to the publisher: /I
|
||||
|
||||
# PG18: replace named window refs like "OVER w1" with neutral "OVER (?)"
|
||||
# this rule can be removed when PG18 is the minimum supported version
|
||||
# only on Sort Key / Group Key / Output lines
|
||||
# Sort Key
|
||||
/^[[:space:]]*Sort Key:/ s/(OVER[[:space:]]+)w[0-9]+/\1(?)/g
|
||||
# Group Key
|
||||
/^[[:space:]]*Group Key:/ s/(OVER[[:space:]]+)w[0-9]+/\1(?)/g
|
||||
# Output
|
||||
/^[[:space:]]*Output:/ s/(OVER[[:space:]]+)w[0-9]+/\1(?)/g
|
||||
# end PG18 window ref normalization
|
||||
|
|
|
|||
|
|
@ -137,6 +137,7 @@ def initialize_db_for_cluster(pg_path, rel_data_path, settings, node_names):
|
|||
# --allow-group-access is used to ensure we set permissions on
|
||||
# private keys correctly
|
||||
"--allow-group-access",
|
||||
"--data-checksums",
|
||||
"--encoding",
|
||||
"UTF8",
|
||||
"--locale",
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ CITUS_ARBITRARY_TEST_DIR = "./tmp_citus_test"
|
|||
|
||||
MASTER = "master"
|
||||
# This should be updated when citus version changes
|
||||
MASTER_VERSION = "13.2"
|
||||
MASTER_VERSION = "14.0"
|
||||
|
||||
HOME = expanduser("~")
|
||||
|
||||
|
|
@ -194,6 +194,7 @@ class CitusUpgradeConfig(CitusBaseClusterConfig):
|
|||
self.new_settings = {"citus.enable_version_checks": "false"}
|
||||
self.user = SUPER_USER_NAME
|
||||
self.mixed_mode = arguments["--mixed"]
|
||||
self.minor_upgrade = arguments.get("--minor-upgrade", False)
|
||||
self.fixed_port = 57635
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -265,6 +265,15 @@ DEPS = {
|
|||
"subquery_in_where": TestDeps(
|
||||
"minimal_schedule", ["multi_behavioral_analytics_create_table"]
|
||||
),
|
||||
"subquery_in_targetlist": TestDeps(
|
||||
"minimal_schedule", ["multi_behavioral_analytics_create_table"]
|
||||
),
|
||||
"window_functions": TestDeps(
|
||||
"minimal_schedule", ["multi_behavioral_analytics_create_table"]
|
||||
),
|
||||
"multi_subquery_window_functions": TestDeps(
|
||||
"minimal_schedule", ["multi_behavioral_analytics_create_table"]
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ Options:
|
|||
--pgxsdir=<pgxsdir> Path to the PGXS directory(ex: ~/.pgenv/src/postgresql-11.3)
|
||||
--citus-old-version=<citus-old-version> Citus old version for local run(ex v8.0.0)
|
||||
--mixed Run the verification phase with one node not upgraded.
|
||||
--minor-upgrade Use minor version upgrade test schedules instead of major version schedules.
|
||||
"""
|
||||
|
||||
import multiprocessing
|
||||
|
|
@ -55,7 +56,14 @@ def run_citus_upgrade_tests(config, before_upgrade_schedule, after_upgrade_sched
|
|||
)
|
||||
|
||||
report_initial_version(config)
|
||||
|
||||
# Store the pre-upgrade GUCs and UDFs for minor version upgrades
|
||||
pre_upgrade = None
|
||||
if config.minor_upgrade:
|
||||
pre_upgrade = get_citus_catalog_info(config)
|
||||
|
||||
run_test_on_coordinator(config, before_upgrade_schedule)
|
||||
|
||||
remove_citus(config.pre_tar_path)
|
||||
if after_upgrade_schedule is None:
|
||||
return
|
||||
|
|
@ -66,9 +74,226 @@ def run_citus_upgrade_tests(config, before_upgrade_schedule, after_upgrade_sched
|
|||
run_alter_citus(config.bindir, config.mixed_mode, config)
|
||||
verify_upgrade(config, config.mixed_mode, config.node_name_to_ports.values())
|
||||
|
||||
# For minor version upgrades, verify GUCs and UDFs does not have breaking changes
|
||||
breaking_changes = []
|
||||
if config.minor_upgrade:
|
||||
breaking_changes = compare_citus_catalog_info(config, pre_upgrade)
|
||||
|
||||
run_test_on_coordinator(config, after_upgrade_schedule)
|
||||
remove_citus(config.post_tar_path)
|
||||
|
||||
# Fail the test if there are any breaking changes
|
||||
if breaking_changes:
|
||||
common.eprint("\n=== BREAKING CHANGES DETECTED ===")
|
||||
for change in breaking_changes:
|
||||
common.eprint(f" - {change}")
|
||||
common.eprint("==================================\n")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def get_citus_catalog_info(config):
|
||||
results = {}
|
||||
# Store GUCs
|
||||
guc_results = utils.psql_capture(
|
||||
config.bindir,
|
||||
config.coordinator_port(),
|
||||
"SELECT name, boot_val FROM pg_settings WHERE name LIKE 'citus.%' ORDER BY name;",
|
||||
)
|
||||
|
||||
guc_lines = guc_results.decode("utf-8").strip().split("\n")
|
||||
results["gucs"] = {}
|
||||
for line in guc_lines[2:]: # Skip header lines
|
||||
name, boot_val = line.split("|")
|
||||
results["gucs"][name.strip()] = boot_val.strip()
|
||||
|
||||
# Store UDFs
|
||||
udf_results = utils.psql_capture(
|
||||
config.bindir,
|
||||
config.coordinator_port(),
|
||||
"""
|
||||
SELECT
|
||||
n.nspname AS schema_name,
|
||||
p.proname AS function_name,
|
||||
pg_get_function_arguments(p.oid) AS full_args,
|
||||
pg_get_function_result(p.oid) AS return_type
|
||||
FROM pg_proc p
|
||||
JOIN pg_namespace n ON n.oid = p.pronamespace
|
||||
JOIN pg_depend d ON d.objid = p.oid
|
||||
JOIN pg_extension e ON e.oid = d.refobjid
|
||||
WHERE e.extname = 'citus'
|
||||
AND d.deptype = 'e'
|
||||
ORDER BY schema_name, function_name, full_args;
|
||||
""",
|
||||
)
|
||||
|
||||
udf_lines = udf_results.decode("utf-8").strip().split("\n")
|
||||
results["udfs"] = {}
|
||||
for line in udf_lines[2:]: # Skip header lines
|
||||
schema_name, function_name, full_args, return_type = line.split("|")
|
||||
key = (schema_name.strip(), function_name.strip())
|
||||
signature = (full_args.strip(), return_type.strip())
|
||||
|
||||
if key not in results["udfs"]:
|
||||
results["udfs"][key] = set()
|
||||
results["udfs"][key].add(signature)
|
||||
|
||||
# Store types, exclude composite types (t.typrelid = 0) and
|
||||
# exclude auto-created array types
|
||||
# (t.typname LIKE '\_%' AND t.typelem <> 0)
|
||||
type_results = utils.psql_capture(
|
||||
config.bindir,
|
||||
config.coordinator_port(),
|
||||
"""
|
||||
SELECT n.nspname, t.typname, t.typtype
|
||||
FROM pg_type t
|
||||
JOIN pg_depend d ON d.objid = t.oid
|
||||
JOIN pg_extension e ON e.oid = d.refobjid
|
||||
JOIN pg_namespace n ON n.oid = t.typnamespace
|
||||
WHERE e.extname = 'citus'
|
||||
AND t.typrelid = 0
|
||||
AND NOT (t.typname LIKE '\\_%%' AND t.typelem <> 0)
|
||||
ORDER BY n.nspname, t.typname;
|
||||
""",
|
||||
)
|
||||
type_lines = type_results.decode("utf-8").strip().split("\n")
|
||||
results["types"] = {}
|
||||
|
||||
for line in type_lines[2:]: # Skip header lines
|
||||
nspname, typname, typtype = line.split("|")
|
||||
key = (nspname.strip(), typname.strip())
|
||||
results["types"][key] = typtype.strip()
|
||||
|
||||
# Store tables and views
|
||||
table_results = utils.psql_capture(
|
||||
config.bindir,
|
||||
config.coordinator_port(),
|
||||
"""
|
||||
SELECT n.nspname, c.relname, a.attname, t.typname
|
||||
FROM pg_class c
|
||||
JOIN pg_namespace n ON n.oid = c.relnamespace
|
||||
JOIN pg_attribute a ON a.attrelid = c.oid
|
||||
JOIN pg_type t ON t.oid = a.atttypid
|
||||
JOIN pg_depend d ON d.objid = c.oid
|
||||
JOIN pg_extension e ON e.oid = d.refobjid
|
||||
WHERE e.extname = 'citus'
|
||||
AND a.attnum > 0
|
||||
AND NOT a.attisdropped
|
||||
ORDER BY n.nspname, c.relname, a.attname;
|
||||
""",
|
||||
)
|
||||
|
||||
table_lines = table_results.decode("utf-8").strip().split("\n")
|
||||
results["tables"] = {}
|
||||
for line in table_lines[2:]: # Skip header lines
|
||||
nspname, relname, attname, typname = line.split("|")
|
||||
key = (nspname.strip(), relname.strip())
|
||||
|
||||
if key not in results["tables"]:
|
||||
results["tables"][key] = {}
|
||||
results["tables"][key][attname.strip()] = typname.strip()
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def compare_citus_catalog_info(config, pre_upgrade):
|
||||
post_upgrade = get_citus_catalog_info(config)
|
||||
breaking_changes = []
|
||||
|
||||
# Compare GUCs
|
||||
for name, boot_val in pre_upgrade["gucs"].items():
|
||||
if name not in post_upgrade["gucs"]:
|
||||
breaking_changes.append(f"GUC {name} was removed")
|
||||
elif post_upgrade["gucs"][name] != boot_val and name != "citus.version":
|
||||
breaking_changes.append(
|
||||
f"The default value of GUC {name} was changed from {boot_val} to {post_upgrade['gucs'][name]}"
|
||||
)
|
||||
|
||||
# Compare UDFs - check if any pre-upgrade signatures were removed
|
||||
for (schema_name, function_name), pre_signatures in pre_upgrade["udfs"].items():
|
||||
if (schema_name, function_name) not in post_upgrade["udfs"]:
|
||||
breaking_changes.append(
|
||||
f"UDF {schema_name}.{function_name} was completely removed"
|
||||
)
|
||||
else:
|
||||
post_signatures = post_upgrade["udfs"][(schema_name, function_name)]
|
||||
removed_signatures = pre_signatures - post_signatures
|
||||
|
||||
if removed_signatures:
|
||||
for full_args, return_type in removed_signatures:
|
||||
if not find_compatible_udf_signature(
|
||||
full_args, return_type, post_signatures
|
||||
):
|
||||
breaking_changes.append(
|
||||
f"UDF signature removed: {schema_name}.{function_name}({full_args}) RETURNS {return_type}"
|
||||
)
|
||||
|
||||
# Compare Types - check if any pre-upgrade types were removed or changed
|
||||
for (nspname, typname), typtype in pre_upgrade["types"].items():
|
||||
if (nspname, typname) not in post_upgrade["types"]:
|
||||
breaking_changes.append(f"Type {nspname}.{typname} was removed")
|
||||
elif post_upgrade["types"][(nspname, typname)] != typtype:
|
||||
breaking_changes.append(
|
||||
f"Type {nspname}.{typname} changed type from {typtype} to {post_upgrade['types'][(nspname, typname)]}"
|
||||
)
|
||||
|
||||
# Compare tables / views - check if any pre-upgrade tables or columns were removed or changed
|
||||
for (nspname, relname), columns in pre_upgrade["tables"].items():
|
||||
if (nspname, relname) not in post_upgrade["tables"]:
|
||||
breaking_changes.append(f"Table/view {nspname}.{relname} was removed")
|
||||
else:
|
||||
post_columns = post_upgrade["tables"][(nspname, relname)]
|
||||
|
||||
for col_name, col_type in columns.items():
|
||||
if col_name not in post_columns:
|
||||
breaking_changes.append(
|
||||
f"Column {col_name} in table/view {nspname}.{relname} was removed"
|
||||
)
|
||||
elif post_columns[col_name] != col_type:
|
||||
breaking_changes.append(
|
||||
f"Column {col_name} in table/view {nspname}.{relname} changed type from {col_type} to {post_columns[col_name]}"
|
||||
)
|
||||
|
||||
return breaking_changes
|
||||
|
||||
|
||||
def find_compatible_udf_signature(full_args, return_type, post_signatures):
|
||||
pre_args_list = [arg.strip() for arg in full_args.split(",") if arg.strip()]
|
||||
|
||||
for post_full_args, post_return_type in post_signatures:
|
||||
if post_return_type == return_type:
|
||||
post_args_list = [
|
||||
arg.strip() for arg in post_full_args.split(",") if arg.strip()
|
||||
]
|
||||
""" Here check if the function signatures are compatible, they are compatible if:
|
||||
post_args_list has all the arguments of pre_args_list in the same order, but can have
|
||||
additional arguments with default values """
|
||||
pre_index = 0
|
||||
post_index = 0
|
||||
compatible = True
|
||||
while pre_index < len(pre_args_list) and post_index < len(post_args_list):
|
||||
if pre_args_list[pre_index] == post_args_list[post_index]:
|
||||
pre_index += 1
|
||||
else:
|
||||
# Check if the argument in post_args_list has a default value
|
||||
if "default" not in post_args_list[post_index].lower():
|
||||
compatible = False
|
||||
break
|
||||
post_index += 1
|
||||
if pre_index < len(pre_args_list):
|
||||
compatible = False
|
||||
continue
|
||||
|
||||
while post_index < len(post_args_list):
|
||||
if "default" not in post_args_list[post_index].lower():
|
||||
compatible = False
|
||||
break
|
||||
post_index += 1
|
||||
|
||||
if compatible:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def install_citus(tar_path):
|
||||
if tar_path:
|
||||
|
|
|
|||
|
|
@ -338,7 +338,9 @@ SELECT workers.result AS worker_password, pg_authid.rolpassword AS coord_passwor
|
|||
|
|
||||
(2 rows)
|
||||
|
||||
SET client_min_messages TO ERROR;
|
||||
ALTER ROLE new_role PASSWORD 'new_password';
|
||||
RESET client_min_messages;
|
||||
SELECT workers.result AS worker_password, pg_authid.rolpassword AS coord_password, workers.result = pg_authid.rolpassword AS password_is_same FROM run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname = 'new_role'$$) workers, pg_authid WHERE pg_authid.rolname = 'new_role';
|
||||
worker_password | coord_password | password_is_same
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
|||
|
|
@ -379,6 +379,7 @@ ORDER BY indexname;
|
|||
|
||||
SELECT conname FROM pg_constraint
|
||||
WHERE conrelid = 'heap_\''tbl'::regclass
|
||||
AND contype <> 'n'
|
||||
ORDER BY conname;
|
||||
conname
|
||||
---------------------------------------------------------------------
|
||||
|
|
@ -416,6 +417,7 @@ ORDER BY indexname;
|
|||
|
||||
SELECT conname FROM pg_constraint
|
||||
WHERE conrelid = 'heap_\''tbl'::regclass
|
||||
AND contype <> 'n'
|
||||
ORDER BY conname;
|
||||
conname
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ SELECT con.conname
|
|||
FROM pg_catalog.pg_constraint con
|
||||
INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid
|
||||
INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace
|
||||
WHERE rel.relname = 'products';
|
||||
WHERE rel.relname = 'products' AND con.contype <> 'n';
|
||||
conname
|
||||
---------------------------------------------------------------------
|
||||
products_pkey
|
||||
|
|
@ -27,7 +27,7 @@ SELECT con.conname
|
|||
FROM pg_catalog.pg_constraint con
|
||||
INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid
|
||||
INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace
|
||||
WHERE rel.relname = 'products_ref';
|
||||
WHERE rel.relname = 'products_ref' AND con.contype <> 'n';
|
||||
conname
|
||||
---------------------------------------------------------------------
|
||||
products_ref_pkey2
|
||||
|
|
@ -41,7 +41,7 @@ SELECT con.conname
|
|||
FROM pg_catalog.pg_constraint con
|
||||
INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid
|
||||
INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace
|
||||
WHERE rel.relname LIKE 'very%';
|
||||
WHERE rel.relname LIKE 'very%' AND con.contype <> 'n';
|
||||
conname
|
||||
---------------------------------------------------------------------
|
||||
verylonglonglonglonglonglonglonglonglonglonglonglonglonglo_pkey
|
||||
|
|
@ -55,7 +55,7 @@ SELECT con.conname
|
|||
FROM pg_catalog.pg_constraint con
|
||||
INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid
|
||||
INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace
|
||||
WHERE rel.relname = 'dist_partitioned_table';
|
||||
WHERE rel.relname = 'dist_partitioned_table' AND con.contype <> 'n';
|
||||
conname
|
||||
---------------------------------------------------------------------
|
||||
dist_partitioned_table_pkey
|
||||
|
|
@ -68,7 +68,7 @@ SELECT con.conname
|
|||
FROM pg_catalog.pg_constraint con
|
||||
INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid
|
||||
INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace
|
||||
WHERE rel.relname = 'citus_local_table';
|
||||
WHERE rel.relname = 'citus_local_table' AND con.contype <> 'n';
|
||||
conname
|
||||
---------------------------------------------------------------------
|
||||
citus_local_table_pkey
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ SELECT id, id, id, id, id,
|
|||
10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10
|
||||
(10 rows)
|
||||
|
||||
EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE) SELECT id FROM t ORDER BY 1;
|
||||
EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE, BUFFERS OFF) SELECT id FROM t ORDER BY 1;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Sort (actual rows=10 loops=1)
|
||||
|
|
@ -66,7 +66,7 @@ EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE) SELECT id FROM
|
|||
(11 rows)
|
||||
|
||||
SET citus.explain_all_tasks TO ON;
|
||||
EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE) SELECT id FROM t ORDER BY 1;
|
||||
EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE, BUFFERS OFF) SELECT id FROM t ORDER BY 1;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Sort (actual rows=10 loops=1)
|
||||
|
|
|
|||
|
|
@ -635,7 +635,7 @@ FROM pg_dist_partition WHERE logicalrelid = 'citus_local_table_4'::regclass;
|
|||
SELECT column_name_to_column('citus_local_table_4', 'a');
|
||||
column_name_to_column
|
||||
---------------------------------------------------------------------
|
||||
{VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}
|
||||
{VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varreturningtype 0 :varnosyn 1 :varattnosyn 1 :location -1}
|
||||
(1 row)
|
||||
|
||||
SELECT master_update_shard_statistics(shardid)
|
||||
|
|
|
|||
|
|
@ -371,11 +371,11 @@ CREATE TABLE cas_1 (a INT UNIQUE);
|
|||
CREATE TABLE cas_par (a INT UNIQUE) PARTITION BY RANGE(a);
|
||||
CREATE TABLE cas_par_1 PARTITION OF cas_par FOR VALUES FROM (1) TO (4);
|
||||
CREATE TABLE cas_par_2 PARTITION OF cas_par FOR VALUES FROM (5) TO (8);
|
||||
ALTER TABLE cas_par_1 ADD CONSTRAINT fkey_cas_test_1 FOREIGN KEY (a) REFERENCES cas_1(a);
|
||||
ALTER TABLE cas_par_1 ADD CONSTRAINT fkey_cas_test_first FOREIGN KEY (a) REFERENCES cas_1(a);
|
||||
CREATE TABLE cas_par2 (a INT UNIQUE) PARTITION BY RANGE(a);
|
||||
CREATE TABLE cas_par2_1 PARTITION OF cas_par2 FOR VALUES FROM (1) TO (4);
|
||||
CREATE TABLE cas_par2_2 PARTITION OF cas_par2 FOR VALUES FROM (5) TO (8);
|
||||
ALTER TABLE cas_par2_1 ADD CONSTRAINT fkey_cas_test_2 FOREIGN KEY (a) REFERENCES cas_1(a);
|
||||
ALTER TABLE cas_par2_1 ADD CONSTRAINT fkey_cas_test_second FOREIGN KEY (a) REFERENCES cas_1(a);
|
||||
CREATE TABLE cas_par3 (a INT UNIQUE) PARTITION BY RANGE(a);
|
||||
CREATE TABLE cas_par3_1 PARTITION OF cas_par3 FOR VALUES FROM (1) TO (4);
|
||||
CREATE TABLE cas_par3_2 PARTITION OF cas_par3 FOR VALUES FROM (5) TO (8);
|
||||
|
|
@ -390,11 +390,11 @@ ERROR: cannot cascade operation via foreign keys as partition table citus_local
|
|||
SELECT citus_add_local_table_to_metadata('cas_par2', cascade_via_foreign_keys=>true);
|
||||
ERROR: cannot cascade operation via foreign keys as partition table citus_local_tables_mx.cas_par2_1 involved in a foreign key relationship that is not inherited from its parent table
|
||||
-- drop the foreign keys and establish them again using the parent table
|
||||
ALTER TABLE cas_par_1 DROP CONSTRAINT fkey_cas_test_1;
|
||||
ALTER TABLE cas_par2_1 DROP CONSTRAINT fkey_cas_test_2;
|
||||
ALTER TABLE cas_par ADD CONSTRAINT fkey_cas_test_1 FOREIGN KEY (a) REFERENCES cas_1(a);
|
||||
ALTER TABLE cas_par2 ADD CONSTRAINT fkey_cas_test_2 FOREIGN KEY (a) REFERENCES cas_1(a);
|
||||
ALTER TABLE cas_par3 ADD CONSTRAINT fkey_cas_test_3 FOREIGN KEY (a) REFERENCES cas_par(a);
|
||||
ALTER TABLE cas_par_1 DROP CONSTRAINT fkey_cas_test_first;
|
||||
ALTER TABLE cas_par2_1 DROP CONSTRAINT fkey_cas_test_second;
|
||||
ALTER TABLE cas_par ADD CONSTRAINT fkey_cas_test_first FOREIGN KEY (a) REFERENCES cas_1(a);
|
||||
ALTER TABLE cas_par2 ADD CONSTRAINT fkey_cas_test_second FOREIGN KEY (a) REFERENCES cas_1(a);
|
||||
ALTER TABLE cas_par3 ADD CONSTRAINT fkey_cas_test_third FOREIGN KEY (a) REFERENCES cas_par(a);
|
||||
-- this should error out as cascade_via_foreign_keys is not set to true
|
||||
SELECT citus_add_local_table_to_metadata('cas_par2');
|
||||
ERROR: relation citus_local_tables_mx.cas_par2 is involved in a foreign key relationship with another table
|
||||
|
|
@ -414,27 +414,29 @@ select inhrelid::regclass from pg_inherits where inhparent='cas_par'::regclass o
|
|||
(2 rows)
|
||||
|
||||
-- verify the fkeys + fkeys with shard ids are created
|
||||
select conname from pg_constraint where conname like 'fkey_cas_test%' order by conname;
|
||||
conname
|
||||
select conname from pg_constraint
|
||||
where conname like 'fkey_cas_test%' and conname not like '%_1' and conname not like '%_2'
|
||||
order by conname;
|
||||
conname
|
||||
---------------------------------------------------------------------
|
||||
fkey_cas_test_1
|
||||
fkey_cas_test_1
|
||||
fkey_cas_test_1
|
||||
fkey_cas_test_1_1330008
|
||||
fkey_cas_test_1_1330008
|
||||
fkey_cas_test_1_1330008
|
||||
fkey_cas_test_2
|
||||
fkey_cas_test_2
|
||||
fkey_cas_test_2
|
||||
fkey_cas_test_2_1330006
|
||||
fkey_cas_test_2_1330006
|
||||
fkey_cas_test_2_1330006
|
||||
fkey_cas_test_3
|
||||
fkey_cas_test_3
|
||||
fkey_cas_test_3
|
||||
fkey_cas_test_3_1330013
|
||||
fkey_cas_test_3_1330013
|
||||
fkey_cas_test_3_1330013
|
||||
fkey_cas_test_first
|
||||
fkey_cas_test_first
|
||||
fkey_cas_test_first
|
||||
fkey_cas_test_first_1330008
|
||||
fkey_cas_test_first_1330008
|
||||
fkey_cas_test_first_1330008
|
||||
fkey_cas_test_second
|
||||
fkey_cas_test_second
|
||||
fkey_cas_test_second
|
||||
fkey_cas_test_second_1330006
|
||||
fkey_cas_test_second_1330006
|
||||
fkey_cas_test_second_1330006
|
||||
fkey_cas_test_third
|
||||
fkey_cas_test_third
|
||||
fkey_cas_test_third
|
||||
fkey_cas_test_third_1330013
|
||||
fkey_cas_test_third_1330013
|
||||
fkey_cas_test_third_1330013
|
||||
(18 rows)
|
||||
|
||||
-- when all partitions are converted, there should be 40 tables and indexes
|
||||
|
|
@ -457,18 +459,20 @@ SELECT count(*) FROM pg_class WHERE relname LIKE 'cas\_%' AND relnamespace IN
|
|||
(1 row)
|
||||
|
||||
-- verify that the shell foreign keys are created on the worker as well
|
||||
select conname from pg_constraint where conname like 'fkey_cas_test%' order by conname;
|
||||
conname
|
||||
select conname from pg_constraint
|
||||
where conname like 'fkey_cas_test%' and conname not like '%_1' and conname not like '%_2'
|
||||
order by conname;
|
||||
conname
|
||||
---------------------------------------------------------------------
|
||||
fkey_cas_test_1
|
||||
fkey_cas_test_1
|
||||
fkey_cas_test_1
|
||||
fkey_cas_test_2
|
||||
fkey_cas_test_2
|
||||
fkey_cas_test_2
|
||||
fkey_cas_test_3
|
||||
fkey_cas_test_3
|
||||
fkey_cas_test_3
|
||||
fkey_cas_test_first
|
||||
fkey_cas_test_first
|
||||
fkey_cas_test_first
|
||||
fkey_cas_test_second
|
||||
fkey_cas_test_second
|
||||
fkey_cas_test_second
|
||||
fkey_cas_test_third
|
||||
fkey_cas_test_third
|
||||
fkey_cas_test_third
|
||||
(9 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
|
|
@ -494,18 +498,20 @@ select inhrelid::regclass from pg_inherits where inhparent='cas_par'::regclass o
|
|||
(2 rows)
|
||||
|
||||
-- verify that the foreign keys with shard ids are gone, due to undistribution
|
||||
select conname from pg_constraint where conname like 'fkey_cas_test%' order by conname;
|
||||
conname
|
||||
select conname from pg_constraint
|
||||
where conname like 'fkey_cas_test%' and conname not like '%_1' and conname not like '%_2'
|
||||
order by conname;
|
||||
conname
|
||||
---------------------------------------------------------------------
|
||||
fkey_cas_test_1
|
||||
fkey_cas_test_1
|
||||
fkey_cas_test_1
|
||||
fkey_cas_test_2
|
||||
fkey_cas_test_2
|
||||
fkey_cas_test_2
|
||||
fkey_cas_test_3
|
||||
fkey_cas_test_3
|
||||
fkey_cas_test_3
|
||||
fkey_cas_test_first
|
||||
fkey_cas_test_first
|
||||
fkey_cas_test_first
|
||||
fkey_cas_test_second
|
||||
fkey_cas_test_second
|
||||
fkey_cas_test_second
|
||||
fkey_cas_test_third
|
||||
fkey_cas_test_third
|
||||
fkey_cas_test_third
|
||||
(9 rows)
|
||||
|
||||
-- add a non-inherited fkey and verify it fails when trying to convert
|
||||
|
|
@ -769,8 +775,8 @@ SELECT logicalrelid, partmethod, partkey FROM pg_dist_partition
|
|||
ORDER BY logicalrelid;
|
||||
logicalrelid | partmethod | partkey
|
||||
---------------------------------------------------------------------
|
||||
parent_dropped_col | h | {VAR :varno 1 :varattno 1 :vartype 1082 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}
|
||||
parent_dropped_col_2 | h | {VAR :varno 1 :varattno 5 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 5 :location -1}
|
||||
parent_dropped_col | h | {VAR :varno 1 :varattno 1 :vartype 1082 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varreturningtype 0 :varnosyn 1 :varattnosyn 1 :location -1}
|
||||
parent_dropped_col_2 | h | {VAR :varno 1 :varattno 5 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varreturningtype 0 :varnosyn 1 :varattnosyn 5 :location -1}
|
||||
(2 rows)
|
||||
|
||||
-- some tests for view propagation on citus local tables
|
||||
|
|
|
|||
|
|
@ -160,32 +160,42 @@ SELECT pg_reload_conf();
|
|||
FROM pg_catalog.pg_class tbl
|
||||
JOIN public.table_fkeys fk on tbl.oid = fk.relid
|
||||
WHERE tbl.relname like '%_89%'
|
||||
AND fk."Constraint" NOT LIKE 'sensors%' AND fk."Constraint" NOT LIKE '%to\_parent%\_1'
|
||||
ORDER BY 1, 2;
|
||||
relname | Constraint | Definition
|
||||
relname | Constraint | Definition
|
||||
---------------------------------------------------------------------
|
||||
sensors_2020_01_01_8970002 | fkey_from_child_to_child_8970002 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970010(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8970002 | fkey_from_child_to_dist_8970002 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970008(measureid)
|
||||
sensors_2020_01_01_8970002 | fkey_from_child_to_parent_8970002 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970009(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8970002 | fkey_from_child_to_ref_8970002 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8970002 | fkey_from_parent_to_child_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970010(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8970002 | fkey_from_parent_to_dist_8970000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970008(measureid)
|
||||
sensors_2020_01_01_8970002 | fkey_from_parent_to_parent_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970009(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8970002 | fkey_from_parent_to_ref_8970000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8970002 | sensors_2020_01_01_8970002_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970010(eventdatetime, measureid)
|
||||
sensors_8970000 | fkey_from_parent_to_child_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970010(eventdatetime, measureid)
|
||||
sensors_8970000 | fkey_from_parent_to_dist_8970000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970008(measureid)
|
||||
sensors_8970000 | fkey_from_parent_to_parent_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970009(eventdatetime, measureid)
|
||||
sensors_8970000 | fkey_from_parent_to_ref_8970000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_8970000 | sensors_8970000_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970010(eventdatetime, measureid)
|
||||
sensors_news_8970003 | fkey_from_parent_to_child_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970010(eventdatetime, measureid)
|
||||
sensors_news_8970003 | fkey_from_parent_to_dist_8970000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970008(measureid)
|
||||
sensors_news_8970003 | fkey_from_parent_to_parent_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970009(eventdatetime, measureid)
|
||||
sensors_news_8970003 | fkey_from_parent_to_ref_8970000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_old_8970001 | fkey_from_parent_to_child_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970010(eventdatetime, measureid)
|
||||
sensors_old_8970001 | fkey_from_parent_to_dist_8970000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970008(measureid)
|
||||
sensors_old_8970001 | fkey_from_parent_to_parent_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970009(eventdatetime, measureid)
|
||||
sensors_old_8970001 | fkey_from_parent_to_ref_8970000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
(22 rows)
|
||||
sensors_2020_01_01_8970002 | fkey_from_child_to_child_8970002 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970010(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8970002 | fkey_from_child_to_dist_8970002 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970008(measureid)
|
||||
sensors_2020_01_01_8970002 | fkey_from_child_to_parent_8970002 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970009(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8970002 | fkey_from_child_to_ref_8970002 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8970002 | fkey_from_parent_to_child_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970010(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8970002 | fkey_from_parent_to_dist_8970000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970008(measureid)
|
||||
sensors_2020_01_01_8970002 | fkey_from_parent_to_parent_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970009(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8970002 | fkey_from_parent_to_ref_8970000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_8970000 | fkey_from_parent_to_child_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970010(eventdatetime, measureid)
|
||||
sensors_8970000 | fkey_from_parent_to_dist_8970000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970008(measureid)
|
||||
sensors_8970000 | fkey_from_parent_to_parent_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970009(eventdatetime, measureid)
|
||||
sensors_8970000 | fkey_from_parent_to_ref_8970000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_news_8970003 | fkey_from_parent_to_child_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970010(eventdatetime, measureid)
|
||||
sensors_news_8970003 | fkey_from_parent_to_dist_8970000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970008(measureid)
|
||||
sensors_news_8970003 | fkey_from_parent_to_parent_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970009(eventdatetime, measureid)
|
||||
sensors_news_8970003 | fkey_from_parent_to_ref_8970000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_old_8970001 | fkey_from_parent_to_child_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970010(eventdatetime, measureid)
|
||||
sensors_old_8970001 | fkey_from_parent_to_dist_8970000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970008(measureid)
|
||||
sensors_old_8970001 | fkey_from_parent_to_parent_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970009(eventdatetime, measureid)
|
||||
sensors_old_8970001 | fkey_from_parent_to_ref_8970000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
(20 rows)
|
||||
|
||||
-- separating generated child FK constraints since PG18 changed their naming (3db61db4)
|
||||
SELECT count(*) AS generated_child_fk_constraints
|
||||
FROM pg_catalog.pg_class tbl
|
||||
JOIN public.table_fkeys fk on tbl.oid = fk.relid
|
||||
WHERE tbl.relname like '%_89%'
|
||||
AND (fk."Constraint" LIKE 'sensors%' OR fk."Constraint" LIKE '%to\_parent%\_1');
|
||||
generated_child_fk_constraints
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like '%_89%' ORDER BY 1,2;
|
||||
tablename | indexdef
|
||||
|
|
@ -240,11 +250,22 @@ SELECT pg_reload_conf();
|
|||
FROM pg_catalog.pg_class tbl
|
||||
JOIN public.table_fkeys fk on tbl.oid = fk.relid
|
||||
WHERE tbl.relname like '%_89%'
|
||||
AND fk."Constraint" NOT LIKE 'sensors%' AND fk."Constraint" NOT LIKE '%to\_parent%\_1'
|
||||
ORDER BY 1, 2;
|
||||
relname | Constraint | Definition
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
SELECT count(*) AS generated_child_fk_constraints
|
||||
FROM pg_catalog.pg_class tbl
|
||||
JOIN public.table_fkeys fk on tbl.oid = fk.relid
|
||||
WHERE tbl.relname like '%_89%'
|
||||
AND (fk."Constraint" LIKE 'sensors%' OR fk."Constraint" LIKE '%to\_parent%\_1');
|
||||
generated_child_fk_constraints
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like '%_89%' ORDER BY 1,2;
|
||||
tablename | indexdef
|
||||
---------------------------------------------------------------------
|
||||
|
|
@ -368,32 +389,41 @@ SELECT public.wait_for_resource_cleanup();
|
|||
FROM pg_catalog.pg_class tbl
|
||||
JOIN public.table_fkeys fk on tbl.oid = fk.relid
|
||||
WHERE tbl.relname like '%_89%'
|
||||
AND fk."Constraint" NOT LIKE 'sensors%' AND fk."Constraint" NOT LIKE '%to\_parent%\_1'
|
||||
ORDER BY 1, 2;
|
||||
relname | Constraint | Definition
|
||||
relname | Constraint | Definition
|
||||
---------------------------------------------------------------------
|
||||
sensors_2020_01_01_8999004 | fkey_from_child_to_child_8999004 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999020(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999004 | fkey_from_child_to_dist_8999004 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999016(measureid)
|
||||
sensors_2020_01_01_8999004 | fkey_from_child_to_parent_8999004 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999018(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999004 | fkey_from_child_to_ref_8999004 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8999004 | fkey_from_parent_to_child_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999020(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999004 | fkey_from_parent_to_dist_8999000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999016(measureid)
|
||||
sensors_2020_01_01_8999004 | fkey_from_parent_to_parent_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999018(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999004 | fkey_from_parent_to_ref_8999000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8999004 | sensors_2020_01_01_8999004_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999020(eventdatetime, measureid)
|
||||
sensors_8999000 | fkey_from_parent_to_child_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999020(eventdatetime, measureid)
|
||||
sensors_8999000 | fkey_from_parent_to_dist_8999000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999016(measureid)
|
||||
sensors_8999000 | fkey_from_parent_to_parent_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999018(eventdatetime, measureid)
|
||||
sensors_8999000 | fkey_from_parent_to_ref_8999000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_8999000 | sensors_8999000_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999020(eventdatetime, measureid)
|
||||
sensors_news_8999006 | fkey_from_parent_to_child_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999020(eventdatetime, measureid)
|
||||
sensors_news_8999006 | fkey_from_parent_to_dist_8999000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999016(measureid)
|
||||
sensors_news_8999006 | fkey_from_parent_to_parent_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999018(eventdatetime, measureid)
|
||||
sensors_news_8999006 | fkey_from_parent_to_ref_8999000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_old_8999002 | fkey_from_parent_to_child_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999020(eventdatetime, measureid)
|
||||
sensors_old_8999002 | fkey_from_parent_to_dist_8999000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999016(measureid)
|
||||
sensors_old_8999002 | fkey_from_parent_to_parent_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999018(eventdatetime, measureid)
|
||||
sensors_old_8999002 | fkey_from_parent_to_ref_8999000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
(22 rows)
|
||||
sensors_2020_01_01_8999004 | fkey_from_child_to_child_8999004 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999020(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999004 | fkey_from_child_to_dist_8999004 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999016(measureid)
|
||||
sensors_2020_01_01_8999004 | fkey_from_child_to_parent_8999004 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999018(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999004 | fkey_from_child_to_ref_8999004 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8999004 | fkey_from_parent_to_child_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999020(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999004 | fkey_from_parent_to_dist_8999000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999016(measureid)
|
||||
sensors_2020_01_01_8999004 | fkey_from_parent_to_parent_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999018(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999004 | fkey_from_parent_to_ref_8999000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_8999000 | fkey_from_parent_to_child_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999020(eventdatetime, measureid)
|
||||
sensors_8999000 | fkey_from_parent_to_dist_8999000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999016(measureid)
|
||||
sensors_8999000 | fkey_from_parent_to_parent_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999018(eventdatetime, measureid)
|
||||
sensors_8999000 | fkey_from_parent_to_ref_8999000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_news_8999006 | fkey_from_parent_to_child_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999020(eventdatetime, measureid)
|
||||
sensors_news_8999006 | fkey_from_parent_to_dist_8999000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999016(measureid)
|
||||
sensors_news_8999006 | fkey_from_parent_to_parent_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999018(eventdatetime, measureid)
|
||||
sensors_news_8999006 | fkey_from_parent_to_ref_8999000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_old_8999002 | fkey_from_parent_to_child_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999020(eventdatetime, measureid)
|
||||
sensors_old_8999002 | fkey_from_parent_to_dist_8999000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999016(measureid)
|
||||
sensors_old_8999002 | fkey_from_parent_to_parent_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999018(eventdatetime, measureid)
|
||||
sensors_old_8999002 | fkey_from_parent_to_ref_8999000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
(20 rows)
|
||||
|
||||
SELECT count(*) AS generated_child_fk_constraints
|
||||
FROM pg_catalog.pg_class tbl
|
||||
JOIN public.table_fkeys fk on tbl.oid = fk.relid
|
||||
WHERE tbl.relname like '%_89%'
|
||||
AND (fk."Constraint" LIKE 'sensors%' OR fk."Constraint" LIKE '%to\_parent%\_1');
|
||||
generated_child_fk_constraints
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like '%_89%' ORDER BY 1,2;
|
||||
tablename | indexdef
|
||||
|
|
@ -448,32 +478,41 @@ SELECT public.wait_for_resource_cleanup();
|
|||
FROM pg_catalog.pg_class tbl
|
||||
JOIN public.table_fkeys fk on tbl.oid = fk.relid
|
||||
WHERE tbl.relname like '%_89%'
|
||||
AND fk."Constraint" NOT LIKE 'sensors%' AND fk."Constraint" NOT LIKE '%to\_parent%\_1'
|
||||
ORDER BY 1, 2;
|
||||
relname | Constraint | Definition
|
||||
relname | Constraint | Definition
|
||||
---------------------------------------------------------------------
|
||||
sensors_2020_01_01_8999005 | fkey_from_child_to_child_8999005 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_child_to_dist_8999005 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_child_to_parent_8999005 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_child_to_ref_8999005 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8999005 | sensors_2020_01_01_8999005_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_8999001 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_8999001 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_8999001 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_8999001 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_8999001 | sensors_8999001_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_news_8999007 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_news_8999007 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_news_8999007 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_news_8999007 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_old_8999003 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_old_8999003 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_old_8999003 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_old_8999003 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
(22 rows)
|
||||
sensors_2020_01_01_8999005 | fkey_from_child_to_child_8999005 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_child_to_dist_8999005 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_child_to_parent_8999005 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_child_to_ref_8999005 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_8999001 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_8999001 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_8999001 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_8999001 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_news_8999007 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_news_8999007 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_news_8999007 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_news_8999007 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_old_8999003 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_old_8999003 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_old_8999003 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_old_8999003 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
(20 rows)
|
||||
|
||||
SELECT count(*) AS generated_child_fk_constraints
|
||||
FROM pg_catalog.pg_class tbl
|
||||
JOIN public.table_fkeys fk on tbl.oid = fk.relid
|
||||
WHERE tbl.relname like '%_89%'
|
||||
AND (fk."Constraint" LIKE 'sensors%' OR fk."Constraint" LIKE '%to\_parent%\_1');
|
||||
generated_child_fk_constraints
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like '%_89%' ORDER BY 1,2;
|
||||
tablename | indexdef
|
||||
|
|
@ -634,32 +673,41 @@ SELECT public.wait_for_resource_cleanup();
|
|||
FROM pg_catalog.pg_class tbl
|
||||
JOIN public.table_fkeys fk on tbl.oid = fk.relid
|
||||
WHERE tbl.relname like '%_89%'
|
||||
AND fk."Constraint" NOT LIKE 'sensors%' AND fk."Constraint" NOT LIKE '%to\_parent%\_1'
|
||||
ORDER BY 1, 2;
|
||||
relname | Constraint | Definition
|
||||
relname | Constraint | Definition
|
||||
---------------------------------------------------------------------
|
||||
sensors_2020_01_01_8999104 | fkey_from_child_to_child_8999104 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999120(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999104 | fkey_from_child_to_dist_8999104 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999116(measureid)
|
||||
sensors_2020_01_01_8999104 | fkey_from_child_to_parent_8999104 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999118(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999104 | fkey_from_child_to_ref_8999104 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8999104 | fkey_from_parent_to_child_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999120(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999104 | fkey_from_parent_to_dist_8999100 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999116(measureid)
|
||||
sensors_2020_01_01_8999104 | fkey_from_parent_to_parent_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999118(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999104 | fkey_from_parent_to_ref_8999100 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8999104 | sensors_2020_01_01_8999104_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999120(eventdatetime, measureid)
|
||||
sensors_8999100 | fkey_from_parent_to_child_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999120(eventdatetime, measureid)
|
||||
sensors_8999100 | fkey_from_parent_to_dist_8999100 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999116(measureid)
|
||||
sensors_8999100 | fkey_from_parent_to_parent_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999118(eventdatetime, measureid)
|
||||
sensors_8999100 | fkey_from_parent_to_ref_8999100 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_8999100 | sensors_8999100_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999120(eventdatetime, measureid)
|
||||
sensors_news_8999106 | fkey_from_parent_to_child_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999120(eventdatetime, measureid)
|
||||
sensors_news_8999106 | fkey_from_parent_to_dist_8999100 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999116(measureid)
|
||||
sensors_news_8999106 | fkey_from_parent_to_parent_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999118(eventdatetime, measureid)
|
||||
sensors_news_8999106 | fkey_from_parent_to_ref_8999100 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_old_8999102 | fkey_from_parent_to_child_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999120(eventdatetime, measureid)
|
||||
sensors_old_8999102 | fkey_from_parent_to_dist_8999100 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999116(measureid)
|
||||
sensors_old_8999102 | fkey_from_parent_to_parent_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999118(eventdatetime, measureid)
|
||||
sensors_old_8999102 | fkey_from_parent_to_ref_8999100 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
(22 rows)
|
||||
sensors_2020_01_01_8999104 | fkey_from_child_to_child_8999104 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999120(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999104 | fkey_from_child_to_dist_8999104 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999116(measureid)
|
||||
sensors_2020_01_01_8999104 | fkey_from_child_to_parent_8999104 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999118(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999104 | fkey_from_child_to_ref_8999104 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8999104 | fkey_from_parent_to_child_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999120(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999104 | fkey_from_parent_to_dist_8999100 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999116(measureid)
|
||||
sensors_2020_01_01_8999104 | fkey_from_parent_to_parent_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999118(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999104 | fkey_from_parent_to_ref_8999100 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_8999100 | fkey_from_parent_to_child_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999120(eventdatetime, measureid)
|
||||
sensors_8999100 | fkey_from_parent_to_dist_8999100 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999116(measureid)
|
||||
sensors_8999100 | fkey_from_parent_to_parent_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999118(eventdatetime, measureid)
|
||||
sensors_8999100 | fkey_from_parent_to_ref_8999100 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_news_8999106 | fkey_from_parent_to_child_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999120(eventdatetime, measureid)
|
||||
sensors_news_8999106 | fkey_from_parent_to_dist_8999100 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999116(measureid)
|
||||
sensors_news_8999106 | fkey_from_parent_to_parent_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999118(eventdatetime, measureid)
|
||||
sensors_news_8999106 | fkey_from_parent_to_ref_8999100 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_old_8999102 | fkey_from_parent_to_child_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999120(eventdatetime, measureid)
|
||||
sensors_old_8999102 | fkey_from_parent_to_dist_8999100 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999116(measureid)
|
||||
sensors_old_8999102 | fkey_from_parent_to_parent_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999118(eventdatetime, measureid)
|
||||
sensors_old_8999102 | fkey_from_parent_to_ref_8999100 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
(20 rows)
|
||||
|
||||
SELECT count(*) AS generated_child_fk_constraints
|
||||
FROM pg_catalog.pg_class tbl
|
||||
JOIN public.table_fkeys fk on tbl.oid = fk.relid
|
||||
WHERE tbl.relname like '%_89%'
|
||||
AND (fk."Constraint" LIKE 'sensors%' OR fk."Constraint" LIKE '%to\_parent%\_1');
|
||||
generated_child_fk_constraints
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like '%_89%' ORDER BY 1,2;
|
||||
tablename | indexdef
|
||||
|
|
@ -714,54 +762,61 @@ SELECT public.wait_for_resource_cleanup();
|
|||
FROM pg_catalog.pg_class tbl
|
||||
JOIN public.table_fkeys fk on tbl.oid = fk.relid
|
||||
WHERE tbl.relname like '%_89%'
|
||||
AND fk."Constraint" NOT LIKE 'sensors%' AND fk."Constraint" NOT LIKE '%to\_parent%\_1'
|
||||
ORDER BY 1, 2;
|
||||
relname | Constraint | Definition
|
||||
relname | Constraint | Definition
|
||||
---------------------------------------------------------------------
|
||||
sensors_2020_01_01_8999005 | fkey_from_child_to_child_8999005 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_child_to_dist_8999005 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_child_to_parent_8999005 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_child_to_ref_8999005 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8999005 | sensors_2020_01_01_8999005_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999105 | fkey_from_child_to_child_8999105 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999121(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999105 | fkey_from_child_to_dist_8999105 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999117(measureid)
|
||||
sensors_2020_01_01_8999105 | fkey_from_child_to_parent_8999105 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999119(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999105 | fkey_from_child_to_ref_8999105 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8999105 | fkey_from_parent_to_child_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999121(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999105 | fkey_from_parent_to_dist_8999101 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999117(measureid)
|
||||
sensors_2020_01_01_8999105 | fkey_from_parent_to_parent_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999119(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999105 | fkey_from_parent_to_ref_8999101 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8999105 | sensors_2020_01_01_8999105_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999121(eventdatetime, measureid)
|
||||
sensors_8999001 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_8999001 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_8999001 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_8999001 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_8999001 | sensors_8999001_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_8999101 | fkey_from_parent_to_child_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999121(eventdatetime, measureid)
|
||||
sensors_8999101 | fkey_from_parent_to_dist_8999101 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999117(measureid)
|
||||
sensors_8999101 | fkey_from_parent_to_parent_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999119(eventdatetime, measureid)
|
||||
sensors_8999101 | fkey_from_parent_to_ref_8999101 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_8999101 | sensors_8999101_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999121(eventdatetime, measureid)
|
||||
sensors_news_8999007 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_news_8999007 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_news_8999007 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_news_8999007 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_news_8999107 | fkey_from_parent_to_child_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999121(eventdatetime, measureid)
|
||||
sensors_news_8999107 | fkey_from_parent_to_dist_8999101 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999117(measureid)
|
||||
sensors_news_8999107 | fkey_from_parent_to_parent_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999119(eventdatetime, measureid)
|
||||
sensors_news_8999107 | fkey_from_parent_to_ref_8999101 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_old_8999003 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_old_8999003 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_old_8999003 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_old_8999003 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_old_8999103 | fkey_from_parent_to_child_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999121(eventdatetime, measureid)
|
||||
sensors_old_8999103 | fkey_from_parent_to_dist_8999101 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999117(measureid)
|
||||
sensors_old_8999103 | fkey_from_parent_to_parent_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999119(eventdatetime, measureid)
|
||||
sensors_old_8999103 | fkey_from_parent_to_ref_8999101 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
(44 rows)
|
||||
sensors_2020_01_01_8999005 | fkey_from_child_to_child_8999005 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_child_to_dist_8999005 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_child_to_parent_8999005 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_child_to_ref_8999005 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8999105 | fkey_from_child_to_child_8999105 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999121(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999105 | fkey_from_child_to_dist_8999105 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999117(measureid)
|
||||
sensors_2020_01_01_8999105 | fkey_from_child_to_parent_8999105 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999119(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999105 | fkey_from_child_to_ref_8999105 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8999105 | fkey_from_parent_to_child_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999121(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999105 | fkey_from_parent_to_dist_8999101 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999117(measureid)
|
||||
sensors_2020_01_01_8999105 | fkey_from_parent_to_parent_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999119(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999105 | fkey_from_parent_to_ref_8999101 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_8999001 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_8999001 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_8999001 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_8999001 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_8999101 | fkey_from_parent_to_child_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999121(eventdatetime, measureid)
|
||||
sensors_8999101 | fkey_from_parent_to_dist_8999101 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999117(measureid)
|
||||
sensors_8999101 | fkey_from_parent_to_parent_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999119(eventdatetime, measureid)
|
||||
sensors_8999101 | fkey_from_parent_to_ref_8999101 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_news_8999007 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_news_8999007 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_news_8999007 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_news_8999007 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_news_8999107 | fkey_from_parent_to_child_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999121(eventdatetime, measureid)
|
||||
sensors_news_8999107 | fkey_from_parent_to_dist_8999101 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999117(measureid)
|
||||
sensors_news_8999107 | fkey_from_parent_to_parent_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999119(eventdatetime, measureid)
|
||||
sensors_news_8999107 | fkey_from_parent_to_ref_8999101 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_old_8999003 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_old_8999003 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_old_8999003 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_old_8999003 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_old_8999103 | fkey_from_parent_to_child_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999121(eventdatetime, measureid)
|
||||
sensors_old_8999103 | fkey_from_parent_to_dist_8999101 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999117(measureid)
|
||||
sensors_old_8999103 | fkey_from_parent_to_parent_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999119(eventdatetime, measureid)
|
||||
sensors_old_8999103 | fkey_from_parent_to_ref_8999101 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
(40 rows)
|
||||
|
||||
SELECT count(*) AS generated_child_fk_constraints
|
||||
FROM pg_catalog.pg_class tbl
|
||||
JOIN public.table_fkeys fk on tbl.oid = fk.relid
|
||||
WHERE tbl.relname like '%_89%'
|
||||
AND (fk."Constraint" LIKE 'sensors%' OR fk."Constraint" LIKE '%to\_parent%\_1');
|
||||
generated_child_fk_constraints
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
||||
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like '%_89%' ORDER BY 1,2;
|
||||
tablename | indexdef
|
||||
|
|
|
|||
|
|
@ -152,32 +152,42 @@ SELECT pg_reload_conf();
|
|||
FROM pg_catalog.pg_class tbl
|
||||
JOIN public.table_fkeys fk on tbl.oid = fk.relid
|
||||
WHERE tbl.relname like '%_89%'
|
||||
AND fk."Constraint" NOT LIKE 'sensors%' AND fk."Constraint" NOT LIKE '%to\_parent%\_1'
|
||||
ORDER BY 1, 2;
|
||||
relname | Constraint | Definition
|
||||
relname | Constraint | Definition
|
||||
---------------------------------------------------------------------
|
||||
sensors_2020_01_01_8970002 | fkey_from_child_to_child_8970002 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970010(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8970002 | fkey_from_child_to_dist_8970002 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970008(measureid)
|
||||
sensors_2020_01_01_8970002 | fkey_from_child_to_parent_8970002 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970009(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8970002 | fkey_from_child_to_ref_8970002 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8970002 | fkey_from_parent_to_child_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970010(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8970002 | fkey_from_parent_to_dist_8970000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970008(measureid)
|
||||
sensors_2020_01_01_8970002 | fkey_from_parent_to_parent_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970009(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8970002 | fkey_from_parent_to_ref_8970000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8970002 | sensors_2020_01_01_8970002_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970010(eventdatetime, measureid)
|
||||
sensors_8970000 | fkey_from_parent_to_child_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970010(eventdatetime, measureid)
|
||||
sensors_8970000 | fkey_from_parent_to_dist_8970000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970008(measureid)
|
||||
sensors_8970000 | fkey_from_parent_to_parent_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970009(eventdatetime, measureid)
|
||||
sensors_8970000 | fkey_from_parent_to_ref_8970000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_8970000 | sensors_8970000_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970010(eventdatetime, measureid)
|
||||
sensors_news_8970003 | fkey_from_parent_to_child_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970010(eventdatetime, measureid)
|
||||
sensors_news_8970003 | fkey_from_parent_to_dist_8970000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970008(measureid)
|
||||
sensors_news_8970003 | fkey_from_parent_to_parent_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970009(eventdatetime, measureid)
|
||||
sensors_news_8970003 | fkey_from_parent_to_ref_8970000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_old_8970001 | fkey_from_parent_to_child_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970010(eventdatetime, measureid)
|
||||
sensors_old_8970001 | fkey_from_parent_to_dist_8970000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970008(measureid)
|
||||
sensors_old_8970001 | fkey_from_parent_to_parent_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970009(eventdatetime, measureid)
|
||||
sensors_old_8970001 | fkey_from_parent_to_ref_8970000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
(22 rows)
|
||||
sensors_2020_01_01_8970002 | fkey_from_child_to_child_8970002 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970010(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8970002 | fkey_from_child_to_dist_8970002 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970008(measureid)
|
||||
sensors_2020_01_01_8970002 | fkey_from_child_to_parent_8970002 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970009(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8970002 | fkey_from_child_to_ref_8970002 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8970002 | fkey_from_parent_to_child_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970010(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8970002 | fkey_from_parent_to_dist_8970000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970008(measureid)
|
||||
sensors_2020_01_01_8970002 | fkey_from_parent_to_parent_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970009(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8970002 | fkey_from_parent_to_ref_8970000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_8970000 | fkey_from_parent_to_child_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970010(eventdatetime, measureid)
|
||||
sensors_8970000 | fkey_from_parent_to_dist_8970000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970008(measureid)
|
||||
sensors_8970000 | fkey_from_parent_to_parent_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970009(eventdatetime, measureid)
|
||||
sensors_8970000 | fkey_from_parent_to_ref_8970000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_news_8970003 | fkey_from_parent_to_child_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970010(eventdatetime, measureid)
|
||||
sensors_news_8970003 | fkey_from_parent_to_dist_8970000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970008(measureid)
|
||||
sensors_news_8970003 | fkey_from_parent_to_parent_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970009(eventdatetime, measureid)
|
||||
sensors_news_8970003 | fkey_from_parent_to_ref_8970000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_old_8970001 | fkey_from_parent_to_child_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970010(eventdatetime, measureid)
|
||||
sensors_old_8970001 | fkey_from_parent_to_dist_8970000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970008(measureid)
|
||||
sensors_old_8970001 | fkey_from_parent_to_parent_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970009(eventdatetime, measureid)
|
||||
sensors_old_8970001 | fkey_from_parent_to_ref_8970000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
(20 rows)
|
||||
|
||||
-- separating generated child FK constraints since PG18 changed their naming (3db61db4)
|
||||
SELECT count(*) AS generated_child_fk_constraints
|
||||
FROM pg_catalog.pg_class tbl
|
||||
JOIN public.table_fkeys fk on tbl.oid = fk.relid
|
||||
WHERE tbl.relname like '%_89%'
|
||||
AND (fk."Constraint" LIKE 'sensors%' OR fk."Constraint" LIKE '%to\_parent%\_1');
|
||||
generated_child_fk_constraints
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like '%_89%' ORDER BY 1,2;
|
||||
tablename | indexdef
|
||||
|
|
@ -232,11 +242,22 @@ SELECT pg_reload_conf();
|
|||
FROM pg_catalog.pg_class tbl
|
||||
JOIN public.table_fkeys fk on tbl.oid = fk.relid
|
||||
WHERE tbl.relname like '%_89%'
|
||||
AND fk."Constraint" NOT LIKE 'sensors%' AND fk."Constraint" NOT LIKE '%to\_parent%\_1'
|
||||
ORDER BY 1, 2;
|
||||
relname | Constraint | Definition
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
SELECT count(*) AS generated_child_fk_constraints
|
||||
FROM pg_catalog.pg_class tbl
|
||||
JOIN public.table_fkeys fk on tbl.oid = fk.relid
|
||||
WHERE tbl.relname like '%_89%'
|
||||
AND (fk."Constraint" LIKE 'sensors%' OR fk."Constraint" LIKE '%to\_parent%\_1');
|
||||
generated_child_fk_constraints
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like '%_89%' ORDER BY 1,2;
|
||||
tablename | indexdef
|
||||
---------------------------------------------------------------------
|
||||
|
|
@ -360,32 +381,41 @@ SELECT public.wait_for_resource_cleanup();
|
|||
FROM pg_catalog.pg_class tbl
|
||||
JOIN public.table_fkeys fk on tbl.oid = fk.relid
|
||||
WHERE tbl.relname like '%_89%'
|
||||
AND fk."Constraint" NOT LIKE 'sensors%' AND fk."Constraint" NOT LIKE '%to\_parent%\_1'
|
||||
ORDER BY 1, 2;
|
||||
relname | Constraint | Definition
|
||||
relname | Constraint | Definition
|
||||
---------------------------------------------------------------------
|
||||
sensors_2020_01_01_8999004 | fkey_from_child_to_child_8999004 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999020(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999004 | fkey_from_child_to_dist_8999004 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999016(measureid)
|
||||
sensors_2020_01_01_8999004 | fkey_from_child_to_parent_8999004 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999018(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999004 | fkey_from_child_to_ref_8999004 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8999004 | fkey_from_parent_to_child_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999020(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999004 | fkey_from_parent_to_dist_8999000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999016(measureid)
|
||||
sensors_2020_01_01_8999004 | fkey_from_parent_to_parent_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999018(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999004 | fkey_from_parent_to_ref_8999000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8999004 | sensors_2020_01_01_8999004_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999020(eventdatetime, measureid)
|
||||
sensors_8999000 | fkey_from_parent_to_child_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999020(eventdatetime, measureid)
|
||||
sensors_8999000 | fkey_from_parent_to_dist_8999000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999016(measureid)
|
||||
sensors_8999000 | fkey_from_parent_to_parent_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999018(eventdatetime, measureid)
|
||||
sensors_8999000 | fkey_from_parent_to_ref_8999000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_8999000 | sensors_8999000_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999020(eventdatetime, measureid)
|
||||
sensors_news_8999006 | fkey_from_parent_to_child_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999020(eventdatetime, measureid)
|
||||
sensors_news_8999006 | fkey_from_parent_to_dist_8999000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999016(measureid)
|
||||
sensors_news_8999006 | fkey_from_parent_to_parent_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999018(eventdatetime, measureid)
|
||||
sensors_news_8999006 | fkey_from_parent_to_ref_8999000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_old_8999002 | fkey_from_parent_to_child_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999020(eventdatetime, measureid)
|
||||
sensors_old_8999002 | fkey_from_parent_to_dist_8999000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999016(measureid)
|
||||
sensors_old_8999002 | fkey_from_parent_to_parent_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999018(eventdatetime, measureid)
|
||||
sensors_old_8999002 | fkey_from_parent_to_ref_8999000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
(22 rows)
|
||||
sensors_2020_01_01_8999004 | fkey_from_child_to_child_8999004 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999020(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999004 | fkey_from_child_to_dist_8999004 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999016(measureid)
|
||||
sensors_2020_01_01_8999004 | fkey_from_child_to_parent_8999004 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999018(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999004 | fkey_from_child_to_ref_8999004 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8999004 | fkey_from_parent_to_child_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999020(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999004 | fkey_from_parent_to_dist_8999000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999016(measureid)
|
||||
sensors_2020_01_01_8999004 | fkey_from_parent_to_parent_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999018(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999004 | fkey_from_parent_to_ref_8999000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_8999000 | fkey_from_parent_to_child_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999020(eventdatetime, measureid)
|
||||
sensors_8999000 | fkey_from_parent_to_dist_8999000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999016(measureid)
|
||||
sensors_8999000 | fkey_from_parent_to_parent_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999018(eventdatetime, measureid)
|
||||
sensors_8999000 | fkey_from_parent_to_ref_8999000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_news_8999006 | fkey_from_parent_to_child_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999020(eventdatetime, measureid)
|
||||
sensors_news_8999006 | fkey_from_parent_to_dist_8999000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999016(measureid)
|
||||
sensors_news_8999006 | fkey_from_parent_to_parent_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999018(eventdatetime, measureid)
|
||||
sensors_news_8999006 | fkey_from_parent_to_ref_8999000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_old_8999002 | fkey_from_parent_to_child_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999020(eventdatetime, measureid)
|
||||
sensors_old_8999002 | fkey_from_parent_to_dist_8999000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999016(measureid)
|
||||
sensors_old_8999002 | fkey_from_parent_to_parent_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999018(eventdatetime, measureid)
|
||||
sensors_old_8999002 | fkey_from_parent_to_ref_8999000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
(20 rows)
|
||||
|
||||
SELECT count(*) AS generated_child_fk_constraints
|
||||
FROM pg_catalog.pg_class tbl
|
||||
JOIN public.table_fkeys fk on tbl.oid = fk.relid
|
||||
WHERE tbl.relname like '%_89%'
|
||||
AND (fk."Constraint" LIKE 'sensors%' OR fk."Constraint" LIKE '%to\_parent%\_1');
|
||||
generated_child_fk_constraints
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like '%_89%' ORDER BY 1,2;
|
||||
tablename | indexdef
|
||||
|
|
@ -440,32 +470,41 @@ SELECT public.wait_for_resource_cleanup();
|
|||
FROM pg_catalog.pg_class tbl
|
||||
JOIN public.table_fkeys fk on tbl.oid = fk.relid
|
||||
WHERE tbl.relname like '%_89%'
|
||||
AND fk."Constraint" NOT LIKE 'sensors%' AND fk."Constraint" NOT LIKE '%to\_parent%\_1'
|
||||
ORDER BY 1, 2;
|
||||
relname | Constraint | Definition
|
||||
relname | Constraint | Definition
|
||||
---------------------------------------------------------------------
|
||||
sensors_2020_01_01_8999005 | fkey_from_child_to_child_8999005 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_child_to_dist_8999005 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_child_to_parent_8999005 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_child_to_ref_8999005 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8999005 | sensors_2020_01_01_8999005_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_8999001 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_8999001 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_8999001 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_8999001 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_8999001 | sensors_8999001_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_news_8999007 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_news_8999007 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_news_8999007 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_news_8999007 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_old_8999003 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_old_8999003 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_old_8999003 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_old_8999003 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
(22 rows)
|
||||
sensors_2020_01_01_8999005 | fkey_from_child_to_child_8999005 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_child_to_dist_8999005 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_child_to_parent_8999005 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_child_to_ref_8999005 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_8999001 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_8999001 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_8999001 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_8999001 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_news_8999007 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_news_8999007 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_news_8999007 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_news_8999007 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_old_8999003 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_old_8999003 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_old_8999003 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_old_8999003 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
(20 rows)
|
||||
|
||||
SELECT count(*) AS generated_child_fk_constraints
|
||||
FROM pg_catalog.pg_class tbl
|
||||
JOIN public.table_fkeys fk on tbl.oid = fk.relid
|
||||
WHERE tbl.relname like '%_89%'
|
||||
AND (fk."Constraint" LIKE 'sensors%' OR fk."Constraint" LIKE '%to\_parent%\_1');
|
||||
generated_child_fk_constraints
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like '%_89%' ORDER BY 1,2;
|
||||
tablename | indexdef
|
||||
|
|
@ -626,32 +665,41 @@ SELECT public.wait_for_resource_cleanup();
|
|||
FROM pg_catalog.pg_class tbl
|
||||
JOIN public.table_fkeys fk on tbl.oid = fk.relid
|
||||
WHERE tbl.relname like '%_89%'
|
||||
AND fk."Constraint" NOT LIKE 'sensors%' AND fk."Constraint" NOT LIKE '%to\_parent%\_1'
|
||||
ORDER BY 1, 2;
|
||||
relname | Constraint | Definition
|
||||
relname | Constraint | Definition
|
||||
---------------------------------------------------------------------
|
||||
sensors_2020_01_01_8999104 | fkey_from_child_to_child_8999104 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999120(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999104 | fkey_from_child_to_dist_8999104 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999116(measureid)
|
||||
sensors_2020_01_01_8999104 | fkey_from_child_to_parent_8999104 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999118(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999104 | fkey_from_child_to_ref_8999104 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8999104 | fkey_from_parent_to_child_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999120(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999104 | fkey_from_parent_to_dist_8999100 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999116(measureid)
|
||||
sensors_2020_01_01_8999104 | fkey_from_parent_to_parent_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999118(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999104 | fkey_from_parent_to_ref_8999100 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8999104 | sensors_2020_01_01_8999104_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999120(eventdatetime, measureid)
|
||||
sensors_8999100 | fkey_from_parent_to_child_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999120(eventdatetime, measureid)
|
||||
sensors_8999100 | fkey_from_parent_to_dist_8999100 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999116(measureid)
|
||||
sensors_8999100 | fkey_from_parent_to_parent_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999118(eventdatetime, measureid)
|
||||
sensors_8999100 | fkey_from_parent_to_ref_8999100 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_8999100 | sensors_8999100_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999120(eventdatetime, measureid)
|
||||
sensors_news_8999106 | fkey_from_parent_to_child_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999120(eventdatetime, measureid)
|
||||
sensors_news_8999106 | fkey_from_parent_to_dist_8999100 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999116(measureid)
|
||||
sensors_news_8999106 | fkey_from_parent_to_parent_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999118(eventdatetime, measureid)
|
||||
sensors_news_8999106 | fkey_from_parent_to_ref_8999100 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_old_8999102 | fkey_from_parent_to_child_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999120(eventdatetime, measureid)
|
||||
sensors_old_8999102 | fkey_from_parent_to_dist_8999100 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999116(measureid)
|
||||
sensors_old_8999102 | fkey_from_parent_to_parent_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999118(eventdatetime, measureid)
|
||||
sensors_old_8999102 | fkey_from_parent_to_ref_8999100 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
(22 rows)
|
||||
sensors_2020_01_01_8999104 | fkey_from_child_to_child_8999104 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999120(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999104 | fkey_from_child_to_dist_8999104 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999116(measureid)
|
||||
sensors_2020_01_01_8999104 | fkey_from_child_to_parent_8999104 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999118(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999104 | fkey_from_child_to_ref_8999104 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8999104 | fkey_from_parent_to_child_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999120(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999104 | fkey_from_parent_to_dist_8999100 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999116(measureid)
|
||||
sensors_2020_01_01_8999104 | fkey_from_parent_to_parent_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999118(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999104 | fkey_from_parent_to_ref_8999100 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_8999100 | fkey_from_parent_to_child_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999120(eventdatetime, measureid)
|
||||
sensors_8999100 | fkey_from_parent_to_dist_8999100 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999116(measureid)
|
||||
sensors_8999100 | fkey_from_parent_to_parent_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999118(eventdatetime, measureid)
|
||||
sensors_8999100 | fkey_from_parent_to_ref_8999100 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_news_8999106 | fkey_from_parent_to_child_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999120(eventdatetime, measureid)
|
||||
sensors_news_8999106 | fkey_from_parent_to_dist_8999100 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999116(measureid)
|
||||
sensors_news_8999106 | fkey_from_parent_to_parent_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999118(eventdatetime, measureid)
|
||||
sensors_news_8999106 | fkey_from_parent_to_ref_8999100 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_old_8999102 | fkey_from_parent_to_child_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999120(eventdatetime, measureid)
|
||||
sensors_old_8999102 | fkey_from_parent_to_dist_8999100 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999116(measureid)
|
||||
sensors_old_8999102 | fkey_from_parent_to_parent_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999118(eventdatetime, measureid)
|
||||
sensors_old_8999102 | fkey_from_parent_to_ref_8999100 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
(20 rows)
|
||||
|
||||
SELECT count(*) AS generated_child_fk_constraints
|
||||
FROM pg_catalog.pg_class tbl
|
||||
JOIN public.table_fkeys fk on tbl.oid = fk.relid
|
||||
WHERE tbl.relname like '%_89%'
|
||||
AND (fk."Constraint" LIKE 'sensors%' OR fk."Constraint" LIKE '%to\_parent%\_1');
|
||||
generated_child_fk_constraints
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like '%_89%' ORDER BY 1,2;
|
||||
tablename | indexdef
|
||||
|
|
@ -706,54 +754,61 @@ SELECT public.wait_for_resource_cleanup();
|
|||
FROM pg_catalog.pg_class tbl
|
||||
JOIN public.table_fkeys fk on tbl.oid = fk.relid
|
||||
WHERE tbl.relname like '%_89%'
|
||||
AND fk."Constraint" NOT LIKE 'sensors%' AND fk."Constraint" NOT LIKE '%to\_parent%\_1'
|
||||
ORDER BY 1, 2;
|
||||
relname | Constraint | Definition
|
||||
relname | Constraint | Definition
|
||||
---------------------------------------------------------------------
|
||||
sensors_2020_01_01_8999005 | fkey_from_child_to_child_8999005 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_child_to_dist_8999005 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_child_to_parent_8999005 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_child_to_ref_8999005 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8999005 | sensors_2020_01_01_8999005_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999105 | fkey_from_child_to_child_8999105 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999121(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999105 | fkey_from_child_to_dist_8999105 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999117(measureid)
|
||||
sensors_2020_01_01_8999105 | fkey_from_child_to_parent_8999105 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999119(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999105 | fkey_from_child_to_ref_8999105 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8999105 | fkey_from_parent_to_child_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999121(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999105 | fkey_from_parent_to_dist_8999101 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999117(measureid)
|
||||
sensors_2020_01_01_8999105 | fkey_from_parent_to_parent_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999119(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999105 | fkey_from_parent_to_ref_8999101 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8999105 | sensors_2020_01_01_8999105_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999121(eventdatetime, measureid)
|
||||
sensors_8999001 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_8999001 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_8999001 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_8999001 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_8999001 | sensors_8999001_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_8999101 | fkey_from_parent_to_child_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999121(eventdatetime, measureid)
|
||||
sensors_8999101 | fkey_from_parent_to_dist_8999101 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999117(measureid)
|
||||
sensors_8999101 | fkey_from_parent_to_parent_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999119(eventdatetime, measureid)
|
||||
sensors_8999101 | fkey_from_parent_to_ref_8999101 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_8999101 | sensors_8999101_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999121(eventdatetime, measureid)
|
||||
sensors_news_8999007 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_news_8999007 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_news_8999007 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_news_8999007 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_news_8999107 | fkey_from_parent_to_child_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999121(eventdatetime, measureid)
|
||||
sensors_news_8999107 | fkey_from_parent_to_dist_8999101 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999117(measureid)
|
||||
sensors_news_8999107 | fkey_from_parent_to_parent_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999119(eventdatetime, measureid)
|
||||
sensors_news_8999107 | fkey_from_parent_to_ref_8999101 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_old_8999003 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_old_8999003 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_old_8999003 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_old_8999003 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_old_8999103 | fkey_from_parent_to_child_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999121(eventdatetime, measureid)
|
||||
sensors_old_8999103 | fkey_from_parent_to_dist_8999101 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999117(measureid)
|
||||
sensors_old_8999103 | fkey_from_parent_to_parent_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999119(eventdatetime, measureid)
|
||||
sensors_old_8999103 | fkey_from_parent_to_ref_8999101 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
(44 rows)
|
||||
sensors_2020_01_01_8999005 | fkey_from_child_to_child_8999005 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_child_to_dist_8999005 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_child_to_parent_8999005 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_child_to_ref_8999005 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999005 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8999105 | fkey_from_child_to_child_8999105 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999121(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999105 | fkey_from_child_to_dist_8999105 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999117(measureid)
|
||||
sensors_2020_01_01_8999105 | fkey_from_child_to_parent_8999105 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999119(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999105 | fkey_from_child_to_ref_8999105 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_2020_01_01_8999105 | fkey_from_parent_to_child_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999121(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999105 | fkey_from_parent_to_dist_8999101 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999117(measureid)
|
||||
sensors_2020_01_01_8999105 | fkey_from_parent_to_parent_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999119(eventdatetime, measureid)
|
||||
sensors_2020_01_01_8999105 | fkey_from_parent_to_ref_8999101 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_8999001 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_8999001 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_8999001 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_8999001 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_8999101 | fkey_from_parent_to_child_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999121(eventdatetime, measureid)
|
||||
sensors_8999101 | fkey_from_parent_to_dist_8999101 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999117(measureid)
|
||||
sensors_8999101 | fkey_from_parent_to_parent_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999119(eventdatetime, measureid)
|
||||
sensors_8999101 | fkey_from_parent_to_ref_8999101 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_news_8999007 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_news_8999007 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_news_8999007 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_news_8999007 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_news_8999107 | fkey_from_parent_to_child_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999121(eventdatetime, measureid)
|
||||
sensors_news_8999107 | fkey_from_parent_to_dist_8999101 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999117(measureid)
|
||||
sensors_news_8999107 | fkey_from_parent_to_parent_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999119(eventdatetime, measureid)
|
||||
sensors_news_8999107 | fkey_from_parent_to_ref_8999101 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_old_8999003 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid)
|
||||
sensors_old_8999003 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid)
|
||||
sensors_old_8999003 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid)
|
||||
sensors_old_8999003 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
sensors_old_8999103 | fkey_from_parent_to_child_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999121(eventdatetime, measureid)
|
||||
sensors_old_8999103 | fkey_from_parent_to_dist_8999101 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999117(measureid)
|
||||
sensors_old_8999103 | fkey_from_parent_to_parent_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999119(eventdatetime, measureid)
|
||||
sensors_old_8999103 | fkey_from_parent_to_ref_8999101 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid)
|
||||
(40 rows)
|
||||
|
||||
SELECT count(*) AS generated_child_fk_constraints
|
||||
FROM pg_catalog.pg_class tbl
|
||||
JOIN public.table_fkeys fk on tbl.oid = fk.relid
|
||||
WHERE tbl.relname like '%_89%'
|
||||
AND (fk."Constraint" LIKE 'sensors%' OR fk."Constraint" LIKE '%to\_parent%\_1');
|
||||
generated_child_fk_constraints
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
||||
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like '%_89%' ORDER BY 1,2;
|
||||
tablename | indexdef
|
||||
|
|
|
|||
|
|
@ -10,6 +10,8 @@
|
|||
-- If chunks get filtered by columnar, less rows are passed to WHERE
|
||||
-- clause, so this function should return a lower number.
|
||||
--
|
||||
CREATE SCHEMA columnar_chunk_filtering;
|
||||
SET search_path TO columnar_chunk_filtering, public;
|
||||
CREATE OR REPLACE FUNCTION filtered_row_count (query text) RETURNS bigint AS
|
||||
$$
|
||||
DECLARE
|
||||
|
|
@ -125,7 +127,7 @@ SELECT * FROM collation_chunk_filtering_test WHERE A > 'B';
|
|||
|
||||
CREATE TABLE simple_chunk_filtering(i int) USING COLUMNAR;
|
||||
INSERT INTO simple_chunk_filtering SELECT generate_series(0,234567);
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT * FROM simple_chunk_filtering WHERE i > 123456;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
|
|
@ -138,7 +140,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
|
|||
(6 rows)
|
||||
|
||||
SET columnar.enable_qual_pushdown = false;
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT * FROM simple_chunk_filtering WHERE i > 123456;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
|
|
@ -153,7 +155,7 @@ SET columnar.enable_qual_pushdown TO DEFAULT;
|
|||
TRUNCATE simple_chunk_filtering;
|
||||
INSERT INTO simple_chunk_filtering SELECT generate_series(0,200000);
|
||||
COPY (SELECT * FROM simple_chunk_filtering WHERE i > 180000) TO '/dev/null';
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT * FROM simple_chunk_filtering WHERE i > 180000;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
|
|
@ -168,7 +170,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
|
|||
DROP TABLE simple_chunk_filtering;
|
||||
CREATE TABLE multi_column_chunk_filtering(a int, b int) USING columnar;
|
||||
INSERT INTO multi_column_chunk_filtering SELECT i,i+1 FROM generate_series(0,234567) i;
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT count(*) FROM multi_column_chunk_filtering WHERE a > 50000;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
|
|
@ -181,7 +183,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
|
|||
Columnar Chunk Groups Removed by Filter: 5
|
||||
(7 rows)
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT count(*) FROM multi_column_chunk_filtering WHERE a > 50000 AND b > 50000;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
|
|
@ -197,7 +199,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
|
|||
-- make next tests faster
|
||||
TRUNCATE multi_column_chunk_filtering;
|
||||
INSERT INTO multi_column_chunk_filtering SELECT generate_series(0,5);
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT b FROM multi_column_chunk_filtering WHERE a > 50000 AND b > 50000;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
|
|
@ -208,7 +210,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
|
|||
Columnar Chunk Groups Removed by Filter: 1
|
||||
(5 rows)
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT b, a FROM multi_column_chunk_filtering WHERE b > 50000;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
|
|
@ -220,7 +222,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
|
|||
Columnar Chunk Groups Removed by Filter: 0
|
||||
(6 rows)
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT FROM multi_column_chunk_filtering WHERE a > 50000;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
|
|
@ -231,7 +233,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
|
|||
Columnar Chunk Groups Removed by Filter: 1
|
||||
(5 rows)
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT FROM multi_column_chunk_filtering;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
|
|
@ -242,7 +244,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
|
|||
BEGIN;
|
||||
ALTER TABLE multi_column_chunk_filtering DROP COLUMN a;
|
||||
ALTER TABLE multi_column_chunk_filtering DROP COLUMN b;
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT * FROM multi_column_chunk_filtering;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
|
|
@ -253,7 +255,7 @@ BEGIN;
|
|||
ROLLBACK;
|
||||
CREATE TABLE another_columnar_table(x int, y int) USING columnar;
|
||||
INSERT INTO another_columnar_table SELECT generate_series(0,5);
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT a, y FROM multi_column_chunk_filtering, another_columnar_table WHERE x > 1;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
|
|
@ -364,7 +366,7 @@ set enable_mergejoin=false;
|
|||
set enable_hashjoin=false;
|
||||
set enable_material=false;
|
||||
-- test different kinds of expressions
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT * FROM r1, coltest WHERE
|
||||
id1 = id AND x1 > 15000 AND x1::text > '000000' AND n1 % 10 = 0;
|
||||
QUERY PLAN
|
||||
|
|
@ -391,7 +393,7 @@ SELECT * FROM r1, coltest WHERE
|
|||
(3 rows)
|
||||
|
||||
-- test equivalence classes
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT * FROM r1, r2, r3, r4, r5, r6, r7, coltest WHERE
|
||||
id = id1 AND id1 = id2 AND id2 = id3 AND id3 = id4 AND
|
||||
id4 = id5 AND id5 = id6 AND id6 = id7;
|
||||
|
|
@ -561,7 +563,7 @@ set columnar.max_custom_scan_paths to default;
|
|||
set columnar.planner_debug_level to default;
|
||||
-- test more complex parameterization
|
||||
set columnar.planner_debug_level = 'notice';
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT * FROM r1, r2, r3, coltest WHERE
|
||||
id1 = id2 AND id2 = id3 AND id3 = id AND
|
||||
n1 > x1 AND n2 > x2 AND n3 > x3;
|
||||
|
|
@ -613,7 +615,7 @@ SELECT * FROM r1, r2, r3, coltest WHERE
|
|||
(3 rows)
|
||||
|
||||
-- test partitioning parameterization
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT * FROM r1, coltest_part WHERE
|
||||
id1 = id AND n1 > x1;
|
||||
QUERY PLAN
|
||||
|
|
@ -680,7 +682,7 @@ DETAIL: unparameterized; 0 clauses pushed down
|
|||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT * FROM coltest c1 WHERE ceil(x1) > 4222;
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
|
|
@ -832,7 +834,7 @@ BEGIN;
|
|||
COMMIT;
|
||||
SET columnar.max_custom_scan_paths TO 50;
|
||||
SET columnar.qual_pushdown_correlation_threshold TO 0.0;
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a = 104356 or a = 76556;
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 1 clauses pushed down
|
||||
|
|
@ -855,7 +857,7 @@ DETAIL: unparameterized; 1 clauses pushed down
|
|||
180912
|
||||
(1 row)
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT sum(a) FROM pushdown_test WHERE a = 194356 or a = 104356 or a = 76556;
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 1 clauses pushed down
|
||||
|
|
@ -878,7 +880,7 @@ DETAIL: unparameterized; 1 clauses pushed down
|
|||
375268
|
||||
(1 row)
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a > a*-1 + b;
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
|
|
@ -894,7 +896,7 @@ DETAIL: unparameterized; 0 clauses pushed down
|
|||
Columnar Projected Columns: a, b
|
||||
(5 rows)
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT sum(a) FROM pushdown_test where (a > 1000 and a < 10000) or (a > 20000 and a < 50000);
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 1 clauses pushed down
|
||||
|
|
@ -917,7 +919,7 @@ DETAIL: unparameterized; 1 clauses pushed down
|
|||
1099459500
|
||||
(1 row)
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT sum(a) FROM pushdown_test where (a > random() and a < 2*a) or (a > 100);
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
|
|
@ -949,7 +951,7 @@ DETAIL: unparameterized; 0 clauses pushed down
|
|||
20000100000
|
||||
(1 row)
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT sum(a) FROM pushdown_test where (a > random() and a <= 2000) or (a > 200000-1010);
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
|
|
@ -977,8 +979,9 @@ DETAIL: unparameterized; 1 clauses pushed down
|
|||
(1 row)
|
||||
|
||||
SET hash_mem_multiplier = 1.0;
|
||||
\pset footer off
|
||||
SELECT columnar_test_helpers.explain_with_pg16_subplan_format($Q$
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT sum(a) FROM pushdown_test where
|
||||
(
|
||||
a > random()
|
||||
|
|
@ -1015,8 +1018,8 @@ CONTEXT: PL/pgSQL function columnar_test_helpers.explain_with_pg16_subplan_form
|
|||
-> Materialize (actual rows=100 loops=199)
|
||||
-> Custom Scan (ColumnarScan) on pushdown_test pushdown_test_1 (actual rows=199 loops=1)
|
||||
Columnar Projected Columns: a
|
||||
(11 rows)
|
||||
|
||||
\pset footer on
|
||||
RESET hash_mem_multiplier;
|
||||
SELECT sum(a) FROM pushdown_test where
|
||||
(
|
||||
|
|
@ -1043,7 +1046,7 @@ DETAIL: unparameterized; 1 clauses pushed down
|
|||
|
||||
create function stable_1(arg int) returns int language plpgsql STRICT IMMUTABLE as
|
||||
$$ BEGIN RETURN 1+arg; END; $$;
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT sum(a) FROM pushdown_test where (a = random() and a < stable_1(a) and a < stable_1(6000));
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
|
|
@ -1096,7 +1099,7 @@ BEGIN;
|
|||
INSERT INTO pushdown_test VALUES(7, 'USA');
|
||||
INSERT INTO pushdown_test VALUES(8, 'ZW');
|
||||
END;
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT id FROM pushdown_test WHERE country IN ('USA', 'BR', 'ZW');
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
|
|
@ -1123,7 +1126,7 @@ BEGIN
|
|||
return 'AL';
|
||||
END;
|
||||
$$;
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT * FROM pushdown_test WHERE country IN ('USA', 'ZW', volatileFunction());
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
|
|
@ -1141,4 +1144,6 @@ SELECT * FROM pushdown_test WHERE country IN ('USA', 'ZW', volatileFunction());
|
|||
8 | ZW
|
||||
(3 rows)
|
||||
|
||||
DROP TABLE pushdown_test;
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP SCHEMA columnar_chunk_filtering CASCADE;
|
||||
RESET client_min_messages;
|
||||
|
|
|
|||
|
|
@ -10,6 +10,8 @@
|
|||
-- If chunks get filtered by columnar, less rows are passed to WHERE
|
||||
-- clause, so this function should return a lower number.
|
||||
--
|
||||
CREATE SCHEMA columnar_chunk_filtering;
|
||||
SET search_path TO columnar_chunk_filtering, public;
|
||||
CREATE OR REPLACE FUNCTION filtered_row_count (query text) RETURNS bigint AS
|
||||
$$
|
||||
DECLARE
|
||||
|
|
@ -125,7 +127,7 @@ SELECT * FROM collation_chunk_filtering_test WHERE A > 'B';
|
|||
|
||||
CREATE TABLE simple_chunk_filtering(i int) USING COLUMNAR;
|
||||
INSERT INTO simple_chunk_filtering SELECT generate_series(0,234567);
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT * FROM simple_chunk_filtering WHERE i > 123456;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
|
|
@ -138,7 +140,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
|
|||
(6 rows)
|
||||
|
||||
SET columnar.enable_qual_pushdown = false;
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT * FROM simple_chunk_filtering WHERE i > 123456;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
|
|
@ -153,7 +155,7 @@ SET columnar.enable_qual_pushdown TO DEFAULT;
|
|||
TRUNCATE simple_chunk_filtering;
|
||||
INSERT INTO simple_chunk_filtering SELECT generate_series(0,200000);
|
||||
COPY (SELECT * FROM simple_chunk_filtering WHERE i > 180000) TO '/dev/null';
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT * FROM simple_chunk_filtering WHERE i > 180000;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
|
|
@ -168,7 +170,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
|
|||
DROP TABLE simple_chunk_filtering;
|
||||
CREATE TABLE multi_column_chunk_filtering(a int, b int) USING columnar;
|
||||
INSERT INTO multi_column_chunk_filtering SELECT i,i+1 FROM generate_series(0,234567) i;
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT count(*) FROM multi_column_chunk_filtering WHERE a > 50000;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
|
|
@ -181,7 +183,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
|
|||
Columnar Chunk Groups Removed by Filter: 5
|
||||
(7 rows)
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT count(*) FROM multi_column_chunk_filtering WHERE a > 50000 AND b > 50000;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
|
|
@ -197,7 +199,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
|
|||
-- make next tests faster
|
||||
TRUNCATE multi_column_chunk_filtering;
|
||||
INSERT INTO multi_column_chunk_filtering SELECT generate_series(0,5);
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT b FROM multi_column_chunk_filtering WHERE a > 50000 AND b > 50000;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
|
|
@ -208,7 +210,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
|
|||
Columnar Chunk Groups Removed by Filter: 1
|
||||
(5 rows)
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT b, a FROM multi_column_chunk_filtering WHERE b > 50000;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
|
|
@ -220,7 +222,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
|
|||
Columnar Chunk Groups Removed by Filter: 0
|
||||
(6 rows)
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT FROM multi_column_chunk_filtering WHERE a > 50000;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
|
|
@ -231,7 +233,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
|
|||
Columnar Chunk Groups Removed by Filter: 1
|
||||
(5 rows)
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT FROM multi_column_chunk_filtering;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
|
|
@ -242,7 +244,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
|
|||
BEGIN;
|
||||
ALTER TABLE multi_column_chunk_filtering DROP COLUMN a;
|
||||
ALTER TABLE multi_column_chunk_filtering DROP COLUMN b;
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT * FROM multi_column_chunk_filtering;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
|
|
@ -253,7 +255,7 @@ BEGIN;
|
|||
ROLLBACK;
|
||||
CREATE TABLE another_columnar_table(x int, y int) USING columnar;
|
||||
INSERT INTO another_columnar_table SELECT generate_series(0,5);
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT a, y FROM multi_column_chunk_filtering, another_columnar_table WHERE x > 1;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
|
|
@ -364,7 +366,7 @@ set enable_mergejoin=false;
|
|||
set enable_hashjoin=false;
|
||||
set enable_material=false;
|
||||
-- test different kinds of expressions
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT * FROM r1, coltest WHERE
|
||||
id1 = id AND x1 > 15000 AND x1::text > '000000' AND n1 % 10 = 0;
|
||||
QUERY PLAN
|
||||
|
|
@ -391,7 +393,7 @@ SELECT * FROM r1, coltest WHERE
|
|||
(3 rows)
|
||||
|
||||
-- test equivalence classes
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT * FROM r1, r2, r3, r4, r5, r6, r7, coltest WHERE
|
||||
id = id1 AND id1 = id2 AND id2 = id3 AND id3 = id4 AND
|
||||
id4 = id5 AND id5 = id6 AND id6 = id7;
|
||||
|
|
@ -561,7 +563,7 @@ set columnar.max_custom_scan_paths to default;
|
|||
set columnar.planner_debug_level to default;
|
||||
-- test more complex parameterization
|
||||
set columnar.planner_debug_level = 'notice';
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT * FROM r1, r2, r3, coltest WHERE
|
||||
id1 = id2 AND id2 = id3 AND id3 = id AND
|
||||
n1 > x1 AND n2 > x2 AND n3 > x3;
|
||||
|
|
@ -613,7 +615,7 @@ SELECT * FROM r1, r2, r3, coltest WHERE
|
|||
(3 rows)
|
||||
|
||||
-- test partitioning parameterization
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT * FROM r1, coltest_part WHERE
|
||||
id1 = id AND n1 > x1;
|
||||
QUERY PLAN
|
||||
|
|
@ -680,7 +682,7 @@ DETAIL: unparameterized; 0 clauses pushed down
|
|||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT * FROM coltest c1 WHERE ceil(x1) > 4222;
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
|
|
@ -832,7 +834,7 @@ BEGIN;
|
|||
COMMIT;
|
||||
SET columnar.max_custom_scan_paths TO 50;
|
||||
SET columnar.qual_pushdown_correlation_threshold TO 0.0;
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a = 104356 or a = 76556;
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 1 clauses pushed down
|
||||
|
|
@ -855,7 +857,7 @@ DETAIL: unparameterized; 1 clauses pushed down
|
|||
180912
|
||||
(1 row)
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT sum(a) FROM pushdown_test WHERE a = 194356 or a = 104356 or a = 76556;
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 1 clauses pushed down
|
||||
|
|
@ -878,7 +880,7 @@ DETAIL: unparameterized; 1 clauses pushed down
|
|||
375268
|
||||
(1 row)
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a > a*-1 + b;
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
|
|
@ -894,7 +896,7 @@ DETAIL: unparameterized; 0 clauses pushed down
|
|||
Columnar Projected Columns: a, b
|
||||
(5 rows)
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT sum(a) FROM pushdown_test where (a > 1000 and a < 10000) or (a > 20000 and a < 50000);
|
||||
NOTICE: columnar planner: adding CustomScan path for pushdown_test
|
||||
DETAIL: unparameterized; 1 clauses pushed down
|
||||
|
|
@ -917,7 +919,7 @@ DETAIL: unparameterized; 1 clauses pushed down
|
|||
1099459500
|
||||
(1 row)
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT sum(a) FROM pushdown_test where (a > random() and a < 2*a) or (a > 100);
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
|
|
@ -949,7 +951,7 @@ DETAIL: unparameterized; 0 clauses pushed down
|
|||
20000100000
|
||||
(1 row)
|
||||
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT sum(a) FROM pushdown_test where (a > random() and a <= 2000) or (a > 200000-1010);
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
|
|
@ -977,8 +979,9 @@ DETAIL: unparameterized; 1 clauses pushed down
|
|||
(1 row)
|
||||
|
||||
SET hash_mem_multiplier = 1.0;
|
||||
\pset footer off
|
||||
SELECT columnar_test_helpers.explain_with_pg16_subplan_format($Q$
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT sum(a) FROM pushdown_test where
|
||||
(
|
||||
a > random()
|
||||
|
|
@ -1015,8 +1018,8 @@ CONTEXT: PL/pgSQL function columnar_test_helpers.explain_with_pg16_subplan_form
|
|||
-> Materialize (actual rows=100 loops=199)
|
||||
-> Custom Scan (ColumnarScan) on pushdown_test pushdown_test_1 (actual rows=199 loops=1)
|
||||
Columnar Projected Columns: a
|
||||
(11 rows)
|
||||
|
||||
\pset footer on
|
||||
RESET hash_mem_multiplier;
|
||||
SELECT sum(a) FROM pushdown_test where
|
||||
(
|
||||
|
|
@ -1043,7 +1046,7 @@ DETAIL: unparameterized; 1 clauses pushed down
|
|||
|
||||
create function stable_1(arg int) returns int language plpgsql STRICT IMMUTABLE as
|
||||
$$ BEGIN RETURN 1+arg; END; $$;
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT sum(a) FROM pushdown_test where (a = random() and a < stable_1(a) and a < stable_1(6000));
|
||||
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
|
||||
HINT: Var must only reference this rel, and Expr must not reference this rel
|
||||
|
|
@ -1096,7 +1099,7 @@ BEGIN;
|
|||
INSERT INTO pushdown_test VALUES(7, 'USA');
|
||||
INSERT INTO pushdown_test VALUES(8, 'ZW');
|
||||
END;
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT id FROM pushdown_test WHERE country IN ('USA', 'BR', 'ZW');
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
|
|
@ -1123,7 +1126,7 @@ BEGIN
|
|||
return 'AL';
|
||||
END;
|
||||
$$;
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT * FROM pushdown_test WHERE country IN ('USA', 'ZW', volatileFunction());
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
|
|
@ -1141,4 +1144,6 @@ SELECT * FROM pushdown_test WHERE country IN ('USA', 'ZW', volatileFunction());
|
|||
8 | ZW
|
||||
(3 rows)
|
||||
|
||||
DROP TABLE pushdown_test;
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP SCHEMA columnar_chunk_filtering CASCADE;
|
||||
RESET client_min_messages;
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
CREATE TABLE test_cursor (a int, b int) USING columnar;
|
||||
INSERT INTO test_cursor SELECT i, j FROM generate_series(0, 100)i, generate_series(100, 200)j;
|
||||
-- A case where the WHERE clause might filter out some chunks
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off) SELECT * FROM test_cursor WHERE a = 25;
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM test_cursor WHERE a = 25;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (ColumnarScan) on test_cursor (actual rows=101 loops=1)
|
||||
|
|
@ -107,7 +107,7 @@ UPDATE test_cursor SET a = 8000 WHERE CURRENT OF a_25;
|
|||
ERROR: UPDATE and CTID scans not supported for ColumnarScan
|
||||
COMMIT;
|
||||
-- A case where the WHERE clause doesn't filter out any chunks
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off) SELECT * FROM test_cursor WHERE a > 25;
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM test_cursor WHERE a > 25;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (ColumnarScan) on test_cursor (actual rows=7575 loops=1)
|
||||
|
|
|
|||
|
|
@ -176,12 +176,15 @@ SELECT pg_total_relation_size('columnar_table_b_idx') * 5 <
|
|||
(1 row)
|
||||
|
||||
-- can't use index scan due to partial index boundaries
|
||||
EXPLAIN (COSTS OFF) SELECT b FROM columnar_table WHERE b = 30000;
|
||||
QUERY PLAN
|
||||
SELECT NOT jsonb_path_exists(
|
||||
columnar_test_helpers._plan_json('SELECT b FROM columnar_table WHERE b = 30000'),
|
||||
-- Regex matches any index-based scan: "Index Scan", "Index Only Scan", "Bitmap Index Scan".
|
||||
'$[*].Plan.** ? (@."Node Type" like_regex "^(Index|Bitmap Index).*Scan$")'
|
||||
) AS uses_no_index_scan; -- expect: t
|
||||
uses_no_index_scan
|
||||
---------------------------------------------------------------------
|
||||
Seq Scan on columnar_table
|
||||
Filter: (b = 30000)
|
||||
(2 rows)
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- can use index scan
|
||||
EXPLAIN (COSTS OFF) SELECT b FROM columnar_table WHERE b = 30001;
|
||||
|
|
|
|||
|
|
@ -204,18 +204,24 @@ $$
|
|||
t
|
||||
(1 row)
|
||||
|
||||
SELECT columnar_test_helpers.uses_custom_scan (
|
||||
$$
|
||||
SELECT a FROM full_correlated WHERE a=0 OR a=5;
|
||||
$$
|
||||
);
|
||||
BEGIN;
|
||||
SET LOCAL enable_indexscan TO 'OFF';
|
||||
SET LOCAL enable_bitmapscan TO 'OFF';
|
||||
SELECT columnar_test_helpers.uses_custom_scan (
|
||||
$$
|
||||
SELECT a FROM full_correlated WHERE a=0 OR a=5;
|
||||
$$
|
||||
);
|
||||
uses_custom_scan
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
ROLLBACK;
|
||||
BEGIN;
|
||||
SET LOCAL columnar.enable_custom_scan TO 'OFF';
|
||||
SET LOCAL enable_indexscan TO 'OFF';
|
||||
SET LOCAL enable_bitmapscan TO 'OFF';
|
||||
SELECT columnar_test_helpers.uses_seq_scan (
|
||||
$$
|
||||
SELECT a FROM full_correlated WHERE a=0 OR a=5;
|
||||
|
|
@ -579,7 +585,7 @@ CREATE INDEX correlated_idx ON correlated(x);
|
|||
CREATE INDEX uncorrelated_idx ON uncorrelated(x);
|
||||
ANALYZE correlated, uncorrelated;
|
||||
-- should choose chunk group filtering; selective and correlated
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT * FROM correlated WHERE x = 78910;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
|
|
@ -598,11 +604,11 @@ SELECT * FROM correlated WHERE x = 78910;
|
|||
(1 row)
|
||||
|
||||
-- should choose index scan; selective but uncorrelated
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze off, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT * FROM uncorrelated WHERE x = 78910;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Index Scan using uncorrelated_idx on uncorrelated (actual rows=1 loops=1)
|
||||
Index Scan using uncorrelated_idx on uncorrelated
|
||||
Index Cond: (x = 78910)
|
||||
(2 rows)
|
||||
|
||||
|
|
|
|||
|
|
@ -204,18 +204,24 @@ $$
|
|||
t
|
||||
(1 row)
|
||||
|
||||
SELECT columnar_test_helpers.uses_custom_scan (
|
||||
$$
|
||||
SELECT a FROM full_correlated WHERE a=0 OR a=5;
|
||||
$$
|
||||
);
|
||||
BEGIN;
|
||||
SET LOCAL enable_indexscan TO 'OFF';
|
||||
SET LOCAL enable_bitmapscan TO 'OFF';
|
||||
SELECT columnar_test_helpers.uses_custom_scan (
|
||||
$$
|
||||
SELECT a FROM full_correlated WHERE a=0 OR a=5;
|
||||
$$
|
||||
);
|
||||
uses_custom_scan
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
ROLLBACK;
|
||||
BEGIN;
|
||||
SET LOCAL columnar.enable_custom_scan TO 'OFF';
|
||||
SET LOCAL enable_indexscan TO 'OFF';
|
||||
SET LOCAL enable_bitmapscan TO 'OFF';
|
||||
SELECT columnar_test_helpers.uses_seq_scan (
|
||||
$$
|
||||
SELECT a FROM full_correlated WHERE a=0 OR a=5;
|
||||
|
|
@ -583,7 +589,7 @@ CREATE INDEX correlated_idx ON correlated(x);
|
|||
CREATE INDEX uncorrelated_idx ON uncorrelated(x);
|
||||
ANALYZE correlated, uncorrelated;
|
||||
-- should choose chunk group filtering; selective and correlated
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT * FROM correlated WHERE x = 78910;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
|
|
@ -602,11 +608,11 @@ SELECT * FROM correlated WHERE x = 78910;
|
|||
(1 row)
|
||||
|
||||
-- should choose index scan; selective but uncorrelated
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
EXPLAIN (analyze off, costs off, timing off, summary off, BUFFERS OFF)
|
||||
SELECT * FROM uncorrelated WHERE x = 78910;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Index Scan using uncorrelated_idx on uncorrelated (actual rows=1 loops=1)
|
||||
Index Scan using uncorrelated_idx on uncorrelated
|
||||
Index Cond: (x = 78910)
|
||||
(2 rows)
|
||||
|
||||
|
|
|
|||
|
|
@ -146,3 +146,11 @@ BEGIN
|
|||
RETURN NEXT;
|
||||
END LOOP;
|
||||
END; $$ language plpgsql;
|
||||
CREATE OR REPLACE FUNCTION _plan_json(q text)
|
||||
RETURNS jsonb
|
||||
LANGUAGE plpgsql AS $$
|
||||
DECLARE j jsonb;
|
||||
BEGIN
|
||||
EXECUTE format('EXPLAIN (FORMAT JSON, COSTS OFF, ANALYZE OFF) %s', q) INTO j;
|
||||
RETURN j;
|
||||
END $$;
|
||||
|
|
|
|||
|
|
@ -219,7 +219,7 @@ EXPLAIN (COSTS OFF) EXECUTE p0;
|
|||
(2 rows)
|
||||
|
||||
EXECUTE p0;
|
||||
EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off) EXECUTE p0;
|
||||
EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) EXECUTE p0;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Insert on t (actual rows=0 loops=1)
|
||||
|
|
@ -252,7 +252,7 @@ EXPLAIN (COSTS OFF) EXECUTE p1(16);
|
|||
(2 rows)
|
||||
|
||||
EXECUTE p1(16);
|
||||
EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off) EXECUTE p1(20);
|
||||
EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) EXECUTE p1(20);
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Insert on t (actual rows=0 loops=1)
|
||||
|
|
@ -289,7 +289,7 @@ EXPLAIN (COSTS OFF) EXECUTE p2(30, 40);
|
|||
(2 rows)
|
||||
|
||||
EXECUTE p2(30, 40);
|
||||
EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off) EXECUTE p2(50, 60);
|
||||
EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) EXECUTE p2(50, 60);
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Insert on t (actual rows=0 loops=1)
|
||||
|
|
@ -342,7 +342,7 @@ EXECUTE p3;
|
|||
8 | 8
|
||||
(2 rows)
|
||||
|
||||
EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off) EXECUTE p3;
|
||||
EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) EXECUTE p3;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (ColumnarScan) on t (actual rows=2 loops=1)
|
||||
|
|
@ -397,7 +397,7 @@ EXECUTE p5(16);
|
|||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off) EXECUTE p5(9);
|
||||
EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) EXECUTE p5(9);
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (ColumnarScan) on t (actual rows=2 loops=1)
|
||||
|
|
@ -453,7 +453,7 @@ EXECUTE p6(30, 40);
|
|||
31 | 41
|
||||
(1 row)
|
||||
|
||||
EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off) EXECUTE p6(50, 60);
|
||||
EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) EXECUTE p6(50, 60);
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (ColumnarScan) on t (actual rows=1 loops=1)
|
||||
|
|
|
|||
|
|
@ -140,7 +140,7 @@ ROLLBACK;
|
|||
-- INSERT..SELECT with re-partitioning in EXPLAIN ANALYZE after local execution
|
||||
BEGIN;
|
||||
INSERT INTO test VALUES (0,1000);
|
||||
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) INSERT INTO test (x, y) SELECT y, x FROM test;
|
||||
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) INSERT INTO test (x, y) SELECT y, x FROM test;
|
||||
ERROR: EXPLAIN ANALYZE is currently not supported for INSERT ... SELECT commands with repartitioning
|
||||
ROLLBACK;
|
||||
-- DDL connects to locahost
|
||||
|
|
|
|||
|
|
@ -473,25 +473,19 @@ SELECT create_distributed_table('color', 'color_id');
|
|||
(1 row)
|
||||
|
||||
INSERT INTO color(color_name) VALUES ('Blue');
|
||||
\d+ color
|
||||
Table "generated_identities.color"
|
||||
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
|
||||
SELECT pg_get_serial_sequence('color', 'color_id');
|
||||
pg_get_serial_sequence
|
||||
---------------------------------------------------------------------
|
||||
color_id | bigint | | not null | generated always as identity | plain | |
|
||||
color_name | character varying | | not null | | extended | |
|
||||
Indexes:
|
||||
"color_color_id_key" UNIQUE CONSTRAINT, btree (color_id)
|
||||
generated_identities.color_color_id_seq
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SET search_path TO generated_identities;
|
||||
\d+ color
|
||||
Table "generated_identities.color"
|
||||
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
|
||||
SELECT pg_get_serial_sequence('color', 'color_id');
|
||||
pg_get_serial_sequence
|
||||
---------------------------------------------------------------------
|
||||
color_id | bigint | | not null | generated always as identity | plain | |
|
||||
color_name | character varying | | not null | | extended | |
|
||||
Indexes:
|
||||
"color_color_id_key" UNIQUE CONSTRAINT, btree (color_id)
|
||||
generated_identities.color_color_id_seq
|
||||
(1 row)
|
||||
|
||||
INSERT INTO color(color_name) VALUES ('Red');
|
||||
-- alter sequence .. restart
|
||||
|
|
|
|||
|
|
@ -1,5 +0,0 @@
|
|||
Parsed test spec with 2 sessions
|
||||
|
||||
starting permutation: s1-begin s1-upd-ins s2-result s1-commit s2-result
|
||||
setup failed: ERROR: MERGE is not supported on PG versions below 15
|
||||
CONTEXT: PL/pgSQL function inline_code_block line XX at RAISE
|
||||
|
|
@ -1,5 +0,0 @@
|
|||
Parsed test spec with 2 sessions
|
||||
|
||||
starting permutation: s1-begin s1-upd-ins s2-begin s2-update s1-commit s2-commit s1-result s2-result
|
||||
setup failed: ERROR: MERGE is not supported on PG versions below 15
|
||||
CONTEXT: PL/pgSQL function inline_code_block line XX at RAISE
|
||||
|
|
@ -312,22 +312,23 @@ EXPLAIN (COSTS OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20;
|
|||
Filter: (age = 20)
|
||||
(8 rows)
|
||||
|
||||
EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20;
|
||||
QUERY PLAN
|
||||
\pset footer off
|
||||
select public.explain_filter('EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20');
|
||||
explain_filter
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
|
||||
Task Count: 1
|
||||
Tuple data received from nodes: 14 bytes
|
||||
Custom Scan (Citus Adaptive) (actual rows=N loops=N)
|
||||
Task Count: N
|
||||
Tuple data received from nodes: N bytes
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Tuple data received from node: 14 bytes
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Index Scan using distributed_table_pkey_1470001 on distributed_table_1470001 distributed_table (actual rows=1 loops=1)
|
||||
Index Cond: (key = 1)
|
||||
Filter: (age = 20)
|
||||
(10 rows)
|
||||
Tuple data received from node: N bytes
|
||||
Node: host=localhost port=N dbname=regression
|
||||
-> Index Scan using distributed_table_pkey_1470001 on distributed_table_1470001 distributed_table (actual rows=N loops=N)
|
||||
Index Cond: (key = N)
|
||||
Filter: (age = N)
|
||||
|
||||
EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF)
|
||||
\pset footer on
|
||||
EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF)
|
||||
WITH r AS ( SELECT GREATEST(random(), 2) z,* FROM distributed_table)
|
||||
SELECT 1 FROM r WHERE z < 3;
|
||||
QUERY PLAN
|
||||
|
|
@ -368,21 +369,22 @@ EXPLAIN (COSTS OFF) DELETE FROM distributed_table WHERE key = 1 AND age = 20;
|
|||
Filter: (age = 20)
|
||||
(9 rows)
|
||||
|
||||
EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) DELETE FROM distributed_table WHERE key = 1 AND age = 20;
|
||||
QUERY PLAN
|
||||
\pset footer off
|
||||
select public.explain_filter('EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) DELETE FROM distributed_table WHERE key = 1 AND age = 20');
|
||||
explain_filter
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
|
||||
Task Count: 1
|
||||
Custom Scan (Citus Adaptive) (actual rows=N loops=N)
|
||||
Task Count: N
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Delete on distributed_table_1470001 distributed_table (actual rows=0 loops=1)
|
||||
-> Index Scan using distributed_table_pkey_1470001 on distributed_table_1470001 distributed_table (actual rows=1 loops=1)
|
||||
Index Cond: (key = 1)
|
||||
Filter: (age = 20)
|
||||
Trigger for constraint second_distributed_table_key_fkey_1470005: calls=1
|
||||
(10 rows)
|
||||
Node: host=localhost port=N dbname=regression
|
||||
-> Delete on distributed_table_1470001 distributed_table (actual rows=N loops=N)
|
||||
-> Index Scan using distributed_table_pkey_1470001 on distributed_table_1470001 distributed_table (actual rows=N loops=N)
|
||||
Index Cond: (key = N)
|
||||
Filter: (age = N)
|
||||
Trigger for constraint second_distributed_table_key_fkey_1470005: calls=N
|
||||
|
||||
\pset footer on
|
||||
-- show that EXPLAIN ANALYZE deleted the row and cascades deletes
|
||||
SELECT * FROM distributed_table WHERE key = 1 AND age = 20 ORDER BY 1,2,3;
|
||||
NOTICE: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((key OPERATOR(pg_catalog.=) 1) AND (age OPERATOR(pg_catalog.=) 20)) ORDER BY key, value, age
|
||||
|
|
|
|||
|
|
@ -250,22 +250,23 @@ EXPLAIN (COSTS OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20;
|
|||
Filter: (age = 20)
|
||||
(8 rows)
|
||||
|
||||
EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20;
|
||||
QUERY PLAN
|
||||
\pset footer off
|
||||
select public.explain_filter('EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20');
|
||||
explain_filter
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
|
||||
Task Count: 1
|
||||
Tuple data received from nodes: 14 bytes
|
||||
Custom Scan (Citus Adaptive) (actual rows=N loops=N)
|
||||
Task Count: N
|
||||
Tuple data received from nodes: N bytes
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Tuple data received from node: 14 bytes
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Index Scan using distributed_table_pkey_1500001 on distributed_table_1500001 distributed_table (actual rows=1 loops=1)
|
||||
Index Cond: (key = 1)
|
||||
Filter: (age = 20)
|
||||
(10 rows)
|
||||
Tuple data received from node: N bytes
|
||||
Node: host=localhost port=N dbname=regression
|
||||
-> Index Scan using distributed_table_pkey_1500001 on distributed_table_1500001 distributed_table (actual rows=N loops=N)
|
||||
Index Cond: (key = N)
|
||||
Filter: (age = N)
|
||||
|
||||
EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF)
|
||||
\pset footer on
|
||||
EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF)
|
||||
WITH r AS ( SELECT GREATEST(random(), 2) z,* FROM distributed_table)
|
||||
SELECT 1 FROM r WHERE z < 3;
|
||||
QUERY PLAN
|
||||
|
|
@ -306,20 +307,21 @@ EXPLAIN (COSTS OFF) DELETE FROM distributed_table WHERE key = 1 AND age = 20;
|
|||
Filter: (age = 20)
|
||||
(9 rows)
|
||||
|
||||
EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) DELETE FROM distributed_table WHERE key = 1 AND age = 20;
|
||||
QUERY PLAN
|
||||
\pset footer off
|
||||
select public.explain_filter('EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) DELETE FROM distributed_table WHERE key = 1 AND age = 20');
|
||||
explain_filter
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
|
||||
Task Count: 1
|
||||
Custom Scan (Citus Adaptive) (actual rows=N loops=N)
|
||||
Task Count: N
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Delete on distributed_table_1500001 distributed_table (actual rows=0 loops=1)
|
||||
-> Index Scan using distributed_table_pkey_1500001 on distributed_table_1500001 distributed_table (actual rows=1 loops=1)
|
||||
Index Cond: (key = 1)
|
||||
Filter: (age = 20)
|
||||
(9 rows)
|
||||
Node: host=localhost port=N dbname=regression
|
||||
-> Delete on distributed_table_1500001 distributed_table (actual rows=N loops=N)
|
||||
-> Index Scan using distributed_table_pkey_1500001 on distributed_table_1500001 distributed_table (actual rows=N loops=N)
|
||||
Index Cond: (key = N)
|
||||
Filter: (age = N)
|
||||
|
||||
\pset footer on
|
||||
-- show that EXPLAIN ANALYZE deleted the row
|
||||
SELECT * FROM distributed_table WHERE key = 1 AND age = 20 ORDER BY 1,2,3;
|
||||
NOTICE: executing the command locally: SELECT key, value, age FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((key OPERATOR(pg_catalog.=) 1) AND (age OPERATOR(pg_catalog.=) 20)) ORDER BY key, value, age
|
||||
|
|
|
|||
|
|
@ -2019,7 +2019,7 @@ DEBUG: <Deparsed MERGE query: MERGE INTO merge_schema.target_table_xxxxxxx t US
|
|||
DEBUG: <Deparsed MERGE query: MERGE INTO merge_schema.target_table_xxxxxxx t USING (SELECT intermediate_result.id, intermediate_result.some_number FROM read_intermediate_result('merge_into_XXX_4000079'::text, 'binary'::citus_copy_format) intermediate_result(id integer, some_number integer)) s ON (t.id OPERATOR(pg_catalog.=) s.some_number) WHEN NOT MATCHED THEN INSERT (id, name) VALUES (s.some_number, 'parag'::text)>
|
||||
DEBUG: Execute MERGE task list
|
||||
-- let's verify if data inserted to second shard of target.
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off) SELECT * FROM target_table;
|
||||
EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM target_table;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
|
||||
|
|
@ -2535,7 +2535,7 @@ HINT: To remove the local data, run: SELECT truncate_local_data_after_distribut
|
|||
-- single shard query given source_json is filtered and Postgres is smart to pushdown
|
||||
-- filter to the target_json as well
|
||||
SELECT public.coordinator_plan($Q$
|
||||
EXPLAIN (ANALYZE ON, TIMING OFF) MERGE INTO target_json sda
|
||||
EXPLAIN (ANALYZE ON, TIMING OFF, BUFFERS OFF) MERGE INTO target_json sda
|
||||
USING (SELECT * FROM source_json WHERE id = 1) sdn
|
||||
ON sda.id = sdn.id
|
||||
WHEN NOT matched THEN
|
||||
|
|
@ -2564,7 +2564,7 @@ SELECT * FROM target_json ORDER BY 1;
|
|||
--SELECT * FROM target_json ORDER BY 1;
|
||||
-- join for source_json is happening at a different place
|
||||
SELECT public.coordinator_plan($Q$
|
||||
EXPLAIN (ANALYZE ON, TIMING OFF) MERGE INTO target_json sda
|
||||
EXPLAIN (ANALYZE ON, TIMING OFF, BUFFERS OFF) MERGE INTO target_json sda
|
||||
USING source_json s1 LEFT JOIN (SELECT * FROM source_json) s2 USING(z)
|
||||
ON sda.id = s1.id AND s1.id = s2.id
|
||||
WHEN NOT matched THEN
|
||||
|
|
@ -2589,7 +2589,7 @@ SELECT * FROM target_json ORDER BY 1;
|
|||
|
||||
-- update JSON column
|
||||
SELECT public.coordinator_plan($Q$
|
||||
EXPLAIN (ANALYZE ON, TIMING OFF) MERGE INTO target_json sda
|
||||
EXPLAIN (ANALYZE ON, TIMING OFF, BUFFERS OFF) MERGE INTO target_json sda
|
||||
USING source_json sdn
|
||||
ON sda.id = sdn.id
|
||||
WHEN matched THEN
|
||||
|
|
|
|||
|
|
@ -193,13 +193,148 @@ SQL function "compare_data" statement 2
|
|||
|
||||
(1 row)
|
||||
|
||||
---- https://github.com/citusdata/citus/issues/8180 ----
|
||||
CREATE TABLE dist_1 (a int, b int, c int);
|
||||
CREATE TABLE dist_2 (a int, b int, c int);
|
||||
CREATE TABLE dist_different_order_1 (b int, a int, c int);
|
||||
SELECT create_distributed_table('dist_1', 'a');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('dist_2', 'a');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('dist_different_order_1', 'a');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
MERGE INTO dist_1
|
||||
USING dist_2
|
||||
ON (dist_1.a = dist_2.b)
|
||||
WHEN MATCHED THEN UPDATE SET b = dist_2.b;
|
||||
MERGE INTO dist_1
|
||||
USING dist_1 src
|
||||
ON (dist_1.a = src.b)
|
||||
WHEN MATCHED THEN UPDATE SET b = src.b;
|
||||
MERGE INTO dist_different_order_1
|
||||
USING dist_1
|
||||
ON (dist_different_order_1.a = dist_1.b)
|
||||
WHEN MATCHED THEN UPDATE SET b = dist_1.b;
|
||||
CREATE TABLE dist_1_cast (a int, b int);
|
||||
CREATE TABLE dist_2_cast (a int, b numeric);
|
||||
SELECT create_distributed_table('dist_1_cast', 'a');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('dist_2_cast', 'a');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
MERGE INTO dist_1_cast
|
||||
USING dist_2_cast
|
||||
ON (dist_1_cast.a = dist_2_cast.b)
|
||||
WHEN MATCHED THEN UPDATE SET b = dist_2_cast.b;
|
||||
ERROR: In the MERGE ON clause, there is a datatype mismatch between target's distribution column and the expression originating from the source.
|
||||
DETAIL: If the types are different, Citus uses different hash functions for the two column types, which might lead to incorrect repartitioning of the result data
|
||||
MERGE INTO dist_1_cast
|
||||
USING (SELECT a, b::int as b FROM dist_2_cast) dist_2_cast
|
||||
ON (dist_1_cast.a = dist_2_cast.b)
|
||||
WHEN MATCHED THEN UPDATE SET b = dist_2_cast.b;
|
||||
-- a more sophisticated example
|
||||
CREATE TABLE dist_source (tstamp_col timestamp, int_col int, text_arr_col text[], text_col text, json_col jsonb);
|
||||
CREATE TABLE dist_target (text_col text, tstamp_col timestamp, json_col jsonb, text_arr_col text[], int_col int);
|
||||
CREATE TABLE local_source (tstamp_col timestamp, int_col int, text_arr_col text[], text_col text, json_col jsonb);
|
||||
CREATE TABLE local_target (text_col text, tstamp_col timestamp, json_col jsonb, text_arr_col text[], int_col int);
|
||||
SELECT create_distributed_table('dist_source', 'tstamp_col');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('dist_target', 'int_col');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO dist_source (tstamp_col, int_col, text_arr_col, text_col, json_col)
|
||||
SELECT TIMESTAMP '2025-01-01 00:00:00' + (i || ' days')::interval,
|
||||
i,
|
||||
ARRAY[i::text, (i+1)::text, (i+2)::text],
|
||||
'source_' || i,
|
||||
('{"a": ' || i || ', "b": ' || i+1 || '}')::jsonb
|
||||
FROM generate_series(1001, 2000) i;
|
||||
INSERT INTO dist_source (tstamp_col, int_col, text_arr_col, text_col, json_col)
|
||||
SELECT TIMESTAMP '2025-01-01 00:00:00' + (i || ' days')::interval,
|
||||
i,
|
||||
ARRAY[i::text, (i+1)::text, (i+2)::text],
|
||||
'source_' || i,
|
||||
('{"a": ' || i || ', "b": ' || i+1 || '}')::jsonb
|
||||
FROM generate_series(901, 1000) i;
|
||||
INSERT INTO dist_target (tstamp_col, int_col, text_arr_col, text_col, json_col)
|
||||
SELECT TIMESTAMP '2025-01-01 00:00:00' + (i || ' days')::interval,
|
||||
i,
|
||||
ARRAY[(i-1)::text, (i)::text, (i+1)::text],
|
||||
'source_' || i,
|
||||
('{"a": ' || i*5 || ', "b": ' || i+20 || '}')::jsonb
|
||||
FROM generate_series(1501, 2000) i;
|
||||
INSERT INTO dist_target (tstamp_col, int_col, text_arr_col, text_col, json_col)
|
||||
SELECT TIMESTAMP '2025-01-01 00:00:00' + (i || ' days')::interval,
|
||||
i,
|
||||
ARRAY[(i-1)::text, (i)::text, (i+1)::text],
|
||||
'source_' || i-1,
|
||||
('{"a": ' || i*5 || ', "b": ' || i+20 || '}')::jsonb
|
||||
FROM generate_series(1401, 1500) i;
|
||||
INSERT INTO local_source SELECT * FROM dist_source;
|
||||
INSERT INTO local_target SELECT * FROM dist_target;
|
||||
-- execute the query on distributed tables
|
||||
MERGE INTO dist_target target_alias
|
||||
USING dist_source source_alias
|
||||
ON (target_alias.text_col = source_alias.text_col AND target_alias.int_col = source_alias.int_col)
|
||||
WHEN MATCHED THEN UPDATE SET
|
||||
tstamp_col = source_alias.tstamp_col + interval '3 day',
|
||||
text_arr_col = array_append(source_alias.text_arr_col, 'updated_' || source_alias.text_col),
|
||||
json_col = ('{"a": "' || replace(source_alias.text_col, '"', '\"') || '"}')::jsonb,
|
||||
text_col = source_alias.json_col->>'a'
|
||||
WHEN NOT MATCHED THEN
|
||||
INSERT VALUES (source_alias.text_col, source_alias.tstamp_col, source_alias.json_col, source_alias.text_arr_col, source_alias.int_col );
|
||||
-- execute the same query on local tables, everything is the same except table names behind the aliases
|
||||
MERGE INTO local_target target_alias
|
||||
USING local_source source_alias
|
||||
ON (target_alias.text_col = source_alias.text_col AND target_alias.int_col = source_alias.int_col)
|
||||
WHEN MATCHED THEN UPDATE SET
|
||||
tstamp_col = source_alias.tstamp_col + interval '3 day',
|
||||
text_arr_col = array_append(source_alias.text_arr_col, 'updated_' || source_alias.text_col),
|
||||
json_col = ('{"a": "' || replace(source_alias.text_col, '"', '\"') || '"}')::jsonb,
|
||||
text_col = source_alias.json_col->>'a'
|
||||
WHEN NOT MATCHED THEN
|
||||
INSERT VALUES (source_alias.text_col, source_alias.tstamp_col, source_alias.json_col, source_alias.text_arr_col, source_alias.int_col );
|
||||
-- compare both targets
|
||||
SELECT COUNT(*) = 0 AS targets_match
|
||||
FROM (
|
||||
SELECT * FROM dist_target
|
||||
EXCEPT
|
||||
SELECT * FROM local_target
|
||||
UNION ALL
|
||||
SELECT * FROM local_target
|
||||
EXCEPT
|
||||
SELECT * FROM dist_target
|
||||
) q;
|
||||
targets_match
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP SCHEMA merge_repartition2_schema CASCADE;
|
||||
NOTICE: drop cascades to 8 other objects
|
||||
DETAIL: drop cascades to table pg_target
|
||||
drop cascades to table pg_source
|
||||
drop cascades to function cleanup_data()
|
||||
drop cascades to function setup_data()
|
||||
drop cascades to function check_data(text,text,text,text)
|
||||
drop cascades to function compare_data()
|
||||
drop cascades to table citus_target
|
||||
drop cascades to table citus_source
|
||||
|
|
|
|||
|
|
@ -1287,11 +1287,17 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
-- with an ugly trick, update the vartype of table from int to bigint
|
||||
-- so that making two tables colocated fails
|
||||
-- include varnullingrels for PG16
|
||||
-- include varnullingrels for PG16+
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
|
||||
\gset
|
||||
\if :server_version_ge_16
|
||||
-- include varreturningtype for PG18+
|
||||
SELECT substring(:'server_version', '\d+')::int >= 18 AS server_version_ge_18
|
||||
\gset
|
||||
\if :server_version_ge_18
|
||||
UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 1 :varreturningtype 0 :varnoold 1 :varoattno 1 :location -1}'
|
||||
WHERE logicalrelid = 'test_2'::regclass;
|
||||
\elif :server_version_ge_16
|
||||
UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 1 :varnoold 1 :varoattno 1 :location -1}'
|
||||
WHERE logicalrelid = 'test_2'::regclass;
|
||||
\else
|
||||
|
|
|
|||
|
|
@ -394,9 +394,9 @@ DEBUG: Wrapping relation "mat_view_on_part_dist" "foo" to a subquery
|
|||
DEBUG: generating subplan XXX_1 for subquery SELECT a FROM mixed_relkind_tests.mat_view_on_part_dist foo WHERE true
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE mixed_relkind_tests.partitioned_distributed_table SET a = foo.a FROM (SELECT foo_1.a, NULL::integer AS b FROM (SELECT intermediate_result.a FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(a integer)) foo_1) foo WHERE (foo.a OPERATOR(pg_catalog.=) partitioned_distributed_table.a)
|
||||
UPDATE partitioned_distributed_table SET a = foo.a FROM partitioned_distributed_table AS foo WHERE foo.a < partitioned_distributed_table.a;
|
||||
ERROR: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns
|
||||
ERROR: modifying the partition value of rows is not allowed
|
||||
UPDATE partitioned_distributed_table SET a = foo.a FROM distributed_table AS foo WHERE foo.a < partitioned_distributed_table.a;
|
||||
ERROR: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns
|
||||
ERROR: modifying the partition value of rows is not allowed
|
||||
-- should work
|
||||
UPDATE partitioned_distributed_table SET a = foo.a FROM partitioned_distributed_table AS foo WHERE foo.a = partitioned_distributed_table.a;
|
||||
UPDATE partitioned_distributed_table SET a = foo.a FROM view_on_part_dist AS foo WHERE foo.a = partitioned_distributed_table.a;
|
||||
|
|
@ -633,11 +633,13 @@ $Q$);
|
|||
(4 rows)
|
||||
|
||||
-- pull to coordinator WINDOW
|
||||
select public.explain_filter('
|
||||
SELECT public.coordinator_plan($Q$
|
||||
EXPLAIN (COSTS OFF)
|
||||
SELECT a, COUNT(*) OVER (PARTITION BY a+1) FROM partitioned_distributed_table ORDER BY 1,2;
|
||||
$Q$);
|
||||
coordinator_plan
|
||||
$Q$)
|
||||
', true);
|
||||
explain_filter
|
||||
---------------------------------------------------------------------
|
||||
Sort
|
||||
Sort Key: remote_scan.a, (count(*) OVER (?))
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -4,7 +4,7 @@
|
|||
SET citus.next_shard_id TO 520000;
|
||||
SET citus.coordinator_aggregation_strategy TO 'disabled';
|
||||
SELECT run_command_on_master_and_workers($r$
|
||||
CREATE OR REPLACE FUNCTION array_sort (ANYARRAY)
|
||||
CREATE OR REPLACE FUNCTION array_sort_citus (ANYARRAY)
|
||||
RETURNS ANYARRAY LANGUAGE SQL
|
||||
AS $$
|
||||
SELECT ARRAY(SELECT unnest($1) ORDER BY 1)
|
||||
|
|
@ -30,9 +30,9 @@ ERROR: array_agg with order by is unsupported
|
|||
SELECT array_agg(distinct l_orderkey ORDER BY l_orderkey) FROM lineitem;
|
||||
ERROR: array_agg with order by is unsupported
|
||||
-- Check array_agg() for different data types and LIMIT clauses
|
||||
SELECT array_sort(array_agg(l_partkey)) FROM lineitem GROUP BY l_orderkey
|
||||
SELECT array_sort_citus(array_agg(l_partkey)) FROM lineitem GROUP BY l_orderkey
|
||||
ORDER BY l_orderkey LIMIT 10;
|
||||
array_sort
|
||||
array_sort_citus
|
||||
---------------------------------------------------------------------
|
||||
{2132,15635,24027,63700,67310,155190}
|
||||
{106170}
|
||||
|
|
@ -46,9 +46,9 @@ SELECT array_sort(array_agg(l_partkey)) FROM lineitem GROUP BY l_orderkey
|
|||
{88362,89414,169544}
|
||||
(10 rows)
|
||||
|
||||
SELECT array_sort(array_agg(l_extendedprice)) FROM lineitem GROUP BY l_orderkey
|
||||
SELECT array_sort_citus(array_agg(l_extendedprice)) FROM lineitem GROUP BY l_orderkey
|
||||
ORDER BY l_orderkey LIMIT 10;
|
||||
array_sort
|
||||
array_sort_citus
|
||||
---------------------------------------------------------------------
|
||||
{13309.60,21168.23,22824.48,28955.64,45983.16,49620.16}
|
||||
{44694.46}
|
||||
|
|
@ -62,9 +62,9 @@ SELECT array_sort(array_agg(l_extendedprice)) FROM lineitem GROUP BY l_orderkey
|
|||
{9681.24,17554.68,30875.02}
|
||||
(10 rows)
|
||||
|
||||
SELECT array_sort(array_agg(l_shipdate)) FROM lineitem GROUP BY l_orderkey
|
||||
SELECT array_sort_citus(array_agg(l_shipdate)) FROM lineitem GROUP BY l_orderkey
|
||||
ORDER BY l_orderkey LIMIT 10;
|
||||
array_sort
|
||||
array_sort_citus
|
||||
---------------------------------------------------------------------
|
||||
{01-29-1996,01-30-1996,03-13-1996,03-30-1996,04-12-1996,04-21-1996}
|
||||
{01-28-1997}
|
||||
|
|
@ -78,9 +78,9 @@ SELECT array_sort(array_agg(l_shipdate)) FROM lineitem GROUP BY l_orderkey
|
|||
{10-09-1998,10-23-1998,10-30-1998}
|
||||
(10 rows)
|
||||
|
||||
SELECT array_sort(array_agg(l_shipmode)) FROM lineitem GROUP BY l_orderkey
|
||||
SELECT array_sort_citus(array_agg(l_shipmode)) FROM lineitem GROUP BY l_orderkey
|
||||
ORDER BY l_orderkey LIMIT 10;
|
||||
array_sort
|
||||
array_sort_citus
|
||||
---------------------------------------------------------------------
|
||||
{"AIR ","FOB ","MAIL ","MAIL ","REG AIR ","TRUCK "}
|
||||
{"RAIL "}
|
||||
|
|
@ -105,10 +105,10 @@ SELECT array_length(array_agg(l_orderkey), 1) FROM lineitem;
|
|||
-- shards and contain different aggregates, filter clauses and other complex
|
||||
-- expressions. Note that the l_orderkey ranges are such that the matching rows
|
||||
-- lie in different shards.
|
||||
SELECT l_quantity, count(*), avg(l_extendedprice), array_sort(array_agg(l_orderkey)) FROM lineitem
|
||||
SELECT l_quantity, count(*), avg(l_extendedprice), array_sort_citus(array_agg(l_orderkey)) FROM lineitem
|
||||
WHERE l_quantity < 5 AND l_orderkey > 5500 AND l_orderkey < 9500
|
||||
GROUP BY l_quantity ORDER BY l_quantity;
|
||||
l_quantity | count | avg | array_sort
|
||||
l_quantity | count | avg | array_sort_citus
|
||||
---------------------------------------------------------------------
|
||||
1.00 | 17 | 1477.1258823529411765 | {5543,5633,5634,5698,5766,5856,5857,5986,8997,9026,9158,9184,9220,9222,9348,9383,9476}
|
||||
2.00 | 19 | 3078.4242105263157895 | {5506,5540,5573,5669,5703,5730,5798,5831,5893,5920,5923,9030,9058,9123,9124,9188,9344,9441,9476}
|
||||
|
|
@ -116,7 +116,7 @@ SELECT l_quantity, count(*), avg(l_extendedprice), array_sort(array_agg(l_orderk
|
|||
4.00 | 19 | 5929.7136842105263158 | {5504,5507,5508,5511,5538,5764,5766,5826,5829,5862,5959,5985,9091,9120,9281,9347,9382,9440,9473}
|
||||
(4 rows)
|
||||
|
||||
SELECT l_quantity, array_sort(array_agg(extract (month FROM o_orderdate))) AS my_month
|
||||
SELECT l_quantity, array_sort_citus(array_agg(extract (month FROM o_orderdate))) AS my_month
|
||||
FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_quantity < 5
|
||||
AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity;
|
||||
l_quantity | my_month
|
||||
|
|
@ -127,10 +127,10 @@ SELECT l_quantity, array_sort(array_agg(extract (month FROM o_orderdate))) AS my
|
|||
4.00 | {1,1,1,2,2,2,5,5,6,6,6,6,8,9,10,10,11,11,12}
|
||||
(4 rows)
|
||||
|
||||
SELECT l_quantity, array_sort(array_agg(l_orderkey * 2 + 1)) FROM lineitem WHERE l_quantity < 5
|
||||
SELECT l_quantity, array_sort_citus(array_agg(l_orderkey * 2 + 1)) FROM lineitem WHERE l_quantity < 5
|
||||
AND octet_length(l_comment) + octet_length('randomtext'::text) > 40
|
||||
AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity;
|
||||
l_quantity | array_sort
|
||||
l_quantity | array_sort_citus
|
||||
---------------------------------------------------------------------
|
||||
1.00 | {11269,11397,11713,11715,11973,18317,18445}
|
||||
2.00 | {11847,18061,18247,18953}
|
||||
|
|
@ -139,17 +139,17 @@ SELECT l_quantity, array_sort(array_agg(l_orderkey * 2 + 1)) FROM lineitem WHERE
|
|||
(4 rows)
|
||||
|
||||
-- Check that we can execute array_agg() with an expression containing NULL values
|
||||
SELECT array_sort(array_agg(case when l_quantity > 20 then l_quantity else NULL end))
|
||||
SELECT array_sort_citus(array_agg(case when l_quantity > 20 then l_quantity else NULL end))
|
||||
FROM lineitem WHERE l_orderkey < 10;
|
||||
array_sort
|
||||
array_sort_citus
|
||||
---------------------------------------------------------------------
|
||||
{24.00,26.00,26.00,27.00,28.00,28.00,28.00,30.00,32.00,35.00,36.00,37.00,38.00,38.00,45.00,46.00,49.00,50.00,NULL,NULL,NULL,NULL,NULL,NULL,NULL}
|
||||
(1 row)
|
||||
|
||||
-- Check that we return NULL in case there are no input rows to array_agg()
|
||||
SELECT array_sort(array_agg(l_orderkey))
|
||||
SELECT array_sort_citus(array_agg(l_orderkey))
|
||||
FROM lineitem WHERE l_orderkey < 0;
|
||||
array_sort
|
||||
array_sort_citus
|
||||
---------------------------------------------------------------------
|
||||
{}
|
||||
(1 row)
|
||||
|
|
|
|||
|
|
@ -170,7 +170,7 @@ SELECT * FROM composite_type_partitioned_table WHERE id = 123;
|
|||
123 | (123,456)
|
||||
(1 row)
|
||||
|
||||
EXPLAIN (ANALYZE TRUE, COSTS FALSE, VERBOSE FALSE, TIMING FALSE, SUMMARY FALSE)
|
||||
EXPLAIN (ANALYZE TRUE, COSTS FALSE, VERBOSE FALSE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF)
|
||||
INSERT INTO composite_type_partitioned_table VALUES (123, '(123, 456)'::other_composite_type);
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
|
|
@ -212,7 +212,7 @@ $cf$);
|
|||
(1 row)
|
||||
|
||||
INSERT INTO composite_type_partitioned_table VALUES (456, '(456, 678)'::other_composite_type);
|
||||
EXPLAIN (ANALYZE TRUE, COSTS FALSE, VERBOSE FALSE, TIMING FALSE, SUMMARY FALSE)
|
||||
EXPLAIN (ANALYZE TRUE, COSTS FALSE, VERBOSE FALSE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF)
|
||||
INSERT INTO composite_type_partitioned_table VALUES (123, '(456, 678)'::other_composite_type);
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue