mirror of https://github.com/citusdata/citus.git
Compare commits
44 Commits
Author | SHA1 | Date |
---|---|---|
|
51ef251535 | |
|
5efbc758ef | |
|
452b6a2212 | |
|
26ae4b8fb3 | |
|
64db74c051 | |
|
4f2a563df3 | |
|
90a2fb3a67 | |
|
0488697ff3 | |
|
f06ae0c106 | |
|
0bbfbf3c36 | |
|
2e06e62476 | |
|
5294df1602 | |
|
781960d16b | |
|
4e54d1f0be | |
|
270a18ca06 | |
|
71d049025d | |
|
d6f20392b8 | |
|
5417fffc70 | |
|
c2c0b97a5b | |
|
cf3018bd5a | |
|
b926fe8114 | |
|
84e43424fc | |
|
d2181aec7f | |
|
8a1c0ae821 | |
|
ee66ca06a8 | |
|
9b19e41e46 | |
|
10be12e4be | |
|
006f6aceaf | |
|
ebe70adc92 | |
|
b9e4364acc | |
|
53ec5abb75 | |
|
ecaa0cda6d | |
|
a8e7c2cb09 | |
|
b7ae596fe8 | |
|
6f4324623c | |
|
d5db0adc17 | |
|
099523452e | |
|
af448da1a7 | |
|
acccad9879 | |
|
77947da17c | |
|
7d56c25e28 | |
|
eba70af7a2 | |
|
3f33390f45 | |
|
7b51f3eee2 |
|
@ -6,7 +6,7 @@ orbs:
|
||||||
parameters:
|
parameters:
|
||||||
image_suffix:
|
image_suffix:
|
||||||
type: string
|
type: string
|
||||||
default: '-v0eef34d'
|
default: '-vcc4855a'
|
||||||
pg13_version:
|
pg13_version:
|
||||||
type: string
|
type: string
|
||||||
default: '13.8'
|
default: '13.8'
|
||||||
|
@ -15,10 +15,10 @@ parameters:
|
||||||
default: '14.5'
|
default: '14.5'
|
||||||
pg15_version:
|
pg15_version:
|
||||||
type: string
|
type: string
|
||||||
default: '15beta4'
|
default: '15rc2'
|
||||||
upgrade_pg_versions:
|
upgrade_pg_versions:
|
||||||
type: string
|
type: string
|
||||||
default: '13.8-14.5-15beta4'
|
default: '13.8-14.5-15rc2'
|
||||||
style_checker_tools_version:
|
style_checker_tools_version:
|
||||||
type: string
|
type: string
|
||||||
default: '0.8.18'
|
default: '0.8.18'
|
||||||
|
|
161
CHANGELOG.md
161
CHANGELOG.md
|
@ -1,3 +1,164 @@
|
||||||
|
### citus v11.1.3 (October 14, 2022) ###
|
||||||
|
|
||||||
|
* Adds support for PostgreSQL 15.0
|
||||||
|
|
||||||
|
* Fixes a bug in `ALTER EXTENSION citus UPDATE`
|
||||||
|
|
||||||
|
* Fixes a bug that causes a crash with empty/null password
|
||||||
|
|
||||||
|
* Fixes a bug that causes not retaining trigger enable/disable settings when
|
||||||
|
re-creating them on shards
|
||||||
|
|
||||||
|
* Fixes a bug that prevents retaining columnar table options after a
|
||||||
|
table-rewrite
|
||||||
|
|
||||||
|
* Raises memory limits in columnar from 256MB to 1GB for reads and writes
|
||||||
|
|
||||||
|
### citus v11.1.2 (September 30, 2022) ###
|
||||||
|
|
||||||
|
* Adds support for PostgreSQL 15rc1
|
||||||
|
|
||||||
|
* Disallows having `ON DELETE/UPDATE SET DEFAULT` actions on columns that
|
||||||
|
default to sequences
|
||||||
|
|
||||||
|
* Fixes a bug that might cause inserting incorrect `DEFAULT` values when
|
||||||
|
applying foreign key actions
|
||||||
|
|
||||||
|
* Fixes a performance issue related to shard-moves by creating replica
|
||||||
|
identities before copying shards
|
||||||
|
|
||||||
|
* Improves logging during shard-splits and resource cleanup
|
||||||
|
|
||||||
|
* Makes sure to reuse connections for shard-splits and logical replication
|
||||||
|
|
||||||
|
* Makes sure to try dropping replication slots a few more times after a failure
|
||||||
|
at the end of the shard-split
|
||||||
|
|
||||||
|
### citus v11.1.1 (September 16, 2022) ###
|
||||||
|
|
||||||
|
* Fixes a bug that prevents `create_distributed_table_concurrently()` working
|
||||||
|
on an empty node
|
||||||
|
|
||||||
|
### citus v11.1.0 (September 15, 2022) ###
|
||||||
|
|
||||||
|
* Adds support for PostgreSQL 15beta4
|
||||||
|
|
||||||
|
* Adds ability to run shard rebalancer in the background
|
||||||
|
|
||||||
|
* Adds `create_distributed_table_concurrently()` UDF to distribute tables
|
||||||
|
without interrupting the application
|
||||||
|
|
||||||
|
* Adds `citus_split_shard_by_split_points()` UDF that allows
|
||||||
|
splitting a shard to specified set of nodes without blocking writes
|
||||||
|
and based on given split points
|
||||||
|
|
||||||
|
* Adds support for non-blocking tenant isolation
|
||||||
|
|
||||||
|
* Adds support for isolation tenants that use partitioned tables
|
||||||
|
or columnar tables
|
||||||
|
|
||||||
|
* Separates columnar table access method into a separate logical extension
|
||||||
|
|
||||||
|
* Adds support for online replication in `replicate_reference_tables()`
|
||||||
|
|
||||||
|
* Improves performance of blocking shard moves
|
||||||
|
|
||||||
|
* Improves non-blocking shard moves with a faster custom copy logic
|
||||||
|
|
||||||
|
* Creates all foreign keys quickly at the end of a shard move
|
||||||
|
|
||||||
|
* Limits `get_rebalance_progress()` to show shards in moving state
|
||||||
|
|
||||||
|
* Makes `citus_move_shard_placement()` idempotent if shard already exists
|
||||||
|
on target node
|
||||||
|
|
||||||
|
* Shows `citus_copy_shard_placement()` progress in `get_rebalance_progres()`
|
||||||
|
|
||||||
|
* Supports changing CPU priorities for backends and shard moves
|
||||||
|
|
||||||
|
* Adds the GUC `citus.allow_unsafe_constraints` to allow unique/exclusion/
|
||||||
|
primary key constraints without distribution column
|
||||||
|
|
||||||
|
* Introduces GUC `citus.skip_constraint_validation`
|
||||||
|
|
||||||
|
* Introduces `citus_locks` view
|
||||||
|
|
||||||
|
* Improves `citus_tables` view by showing local tables added to metadata
|
||||||
|
|
||||||
|
* Improves columnar table access method by moving old catalog tables into
|
||||||
|
an internal schema and introduces more secure & informative views based
|
||||||
|
on them
|
||||||
|
|
||||||
|
* Adds support for `GRANT/REVOKE` on aggregates
|
||||||
|
|
||||||
|
* Adds support for `NULLS NOT DISTINCT` clauses for indexes for PG15+
|
||||||
|
|
||||||
|
* Adds support for setting relation options for columnar tables using
|
||||||
|
`ALTER TABLE`
|
||||||
|
|
||||||
|
* Adds support for unlogged distributed sequences
|
||||||
|
|
||||||
|
* Removes `do_repair` option from `citus_copy_shard_placement()`
|
||||||
|
|
||||||
|
* Removes deprecated re-partitioning functions like
|
||||||
|
`worker_hash_partition_table()`
|
||||||
|
|
||||||
|
* Drops support for isolation tenants that use replicated tables
|
||||||
|
|
||||||
|
* Checks existence of the shards before insert, delete, and update
|
||||||
|
|
||||||
|
* Hides tables owned by extensions from `citus_tables` and `citus_shards`
|
||||||
|
|
||||||
|
* Propagates `VACUUM` and `ANALYZE` to worker nodes
|
||||||
|
|
||||||
|
* Makes non-partitioned table size calculation quicker
|
||||||
|
|
||||||
|
* Improves `create_distributed_table()` by creating new colocation entries when
|
||||||
|
using `colocate_with => 'none'`
|
||||||
|
|
||||||
|
* Ensures that `SELECT .. FOR UPDATE` opens a transaction block when used in
|
||||||
|
a function call
|
||||||
|
|
||||||
|
* Prevents a segfault by disallowing usage of SQL functions referencing to a
|
||||||
|
distributed table
|
||||||
|
|
||||||
|
* Prevents creating a new colocation entry when replicating reference tables
|
||||||
|
|
||||||
|
* Fixes a bug in query escaping in `undistribute_table()` and
|
||||||
|
`alter_distributed_table()`
|
||||||
|
|
||||||
|
* Fixes a bug preventing the usage of `isolate_tenant_to_new_shard()` with text
|
||||||
|
column
|
||||||
|
|
||||||
|
* Fixes a bug that may cause `GRANT` to propagate within `CREATE EXTENSION`
|
||||||
|
|
||||||
|
* Fixes a bug that causes incorrectly marking `metadatasynced` flag for
|
||||||
|
coordinator
|
||||||
|
|
||||||
|
* Fixes a bug that may prevent Citus from creating function in transaction
|
||||||
|
block properly
|
||||||
|
|
||||||
|
* Fixes a bug that prevents promoting read-replicas as primaries
|
||||||
|
|
||||||
|
* Fixes a bug that prevents setting colocation group of a partitioned
|
||||||
|
distributed table to `none`
|
||||||
|
|
||||||
|
* Fixes a bug that prevents using `AUTO` option for `VACUUM (INDEX_CLEANUP)`
|
||||||
|
operation
|
||||||
|
|
||||||
|
* Fixes a segfault in `citus_copy_shard_placement()`
|
||||||
|
|
||||||
|
* Fixes an issue that can cause logical reference table replication to fail
|
||||||
|
|
||||||
|
* Fixes schema name qualification for `RENAME SEQUENCE` statement
|
||||||
|
|
||||||
|
* Fixes several small memory leaks
|
||||||
|
|
||||||
|
* Fixes the transaction timestamp column of the `get_current_transaction_id()`
|
||||||
|
on coordinator
|
||||||
|
|
||||||
|
* Maps any unused parameters to a generic type in prepared statements
|
||||||
|
|
||||||
### citus v10.2.8 (August 19, 2022) ###
|
### citus v10.2.8 (August 19, 2022) ###
|
||||||
|
|
||||||
* Fixes compilation warning caused by latest upgrade script changes
|
* Fixes compilation warning caused by latest upgrade script changes
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
#! /bin/sh
|
#! /bin/sh
|
||||||
# Guess values for system-dependent variables and create Makefiles.
|
# Guess values for system-dependent variables and create Makefiles.
|
||||||
# Generated by GNU Autoconf 2.69 for Citus 11.1devel.
|
# Generated by GNU Autoconf 2.69 for Citus 11.1.3.
|
||||||
#
|
#
|
||||||
#
|
#
|
||||||
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
|
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
|
||||||
|
@ -579,8 +579,8 @@ MAKEFLAGS=
|
||||||
# Identity of this package.
|
# Identity of this package.
|
||||||
PACKAGE_NAME='Citus'
|
PACKAGE_NAME='Citus'
|
||||||
PACKAGE_TARNAME='citus'
|
PACKAGE_TARNAME='citus'
|
||||||
PACKAGE_VERSION='11.1devel'
|
PACKAGE_VERSION='11.1.3'
|
||||||
PACKAGE_STRING='Citus 11.1devel'
|
PACKAGE_STRING='Citus 11.1.3'
|
||||||
PACKAGE_BUGREPORT=''
|
PACKAGE_BUGREPORT=''
|
||||||
PACKAGE_URL=''
|
PACKAGE_URL=''
|
||||||
|
|
||||||
|
@ -1262,7 +1262,7 @@ if test "$ac_init_help" = "long"; then
|
||||||
# Omit some internal or obsolete options to make the list less imposing.
|
# Omit some internal or obsolete options to make the list less imposing.
|
||||||
# This message is too long to be a string in the A/UX 3.1 sh.
|
# This message is too long to be a string in the A/UX 3.1 sh.
|
||||||
cat <<_ACEOF
|
cat <<_ACEOF
|
||||||
\`configure' configures Citus 11.1devel to adapt to many kinds of systems.
|
\`configure' configures Citus 11.1.3 to adapt to many kinds of systems.
|
||||||
|
|
||||||
Usage: $0 [OPTION]... [VAR=VALUE]...
|
Usage: $0 [OPTION]... [VAR=VALUE]...
|
||||||
|
|
||||||
|
@ -1324,7 +1324,7 @@ fi
|
||||||
|
|
||||||
if test -n "$ac_init_help"; then
|
if test -n "$ac_init_help"; then
|
||||||
case $ac_init_help in
|
case $ac_init_help in
|
||||||
short | recursive ) echo "Configuration of Citus 11.1devel:";;
|
short | recursive ) echo "Configuration of Citus 11.1.3:";;
|
||||||
esac
|
esac
|
||||||
cat <<\_ACEOF
|
cat <<\_ACEOF
|
||||||
|
|
||||||
|
@ -1429,7 +1429,7 @@ fi
|
||||||
test -n "$ac_init_help" && exit $ac_status
|
test -n "$ac_init_help" && exit $ac_status
|
||||||
if $ac_init_version; then
|
if $ac_init_version; then
|
||||||
cat <<\_ACEOF
|
cat <<\_ACEOF
|
||||||
Citus configure 11.1devel
|
Citus configure 11.1.3
|
||||||
generated by GNU Autoconf 2.69
|
generated by GNU Autoconf 2.69
|
||||||
|
|
||||||
Copyright (C) 2012 Free Software Foundation, Inc.
|
Copyright (C) 2012 Free Software Foundation, Inc.
|
||||||
|
@ -1912,7 +1912,7 @@ cat >config.log <<_ACEOF
|
||||||
This file contains any messages produced by compilers while
|
This file contains any messages produced by compilers while
|
||||||
running configure, to aid debugging if configure makes a mistake.
|
running configure, to aid debugging if configure makes a mistake.
|
||||||
|
|
||||||
It was created by Citus $as_me 11.1devel, which was
|
It was created by Citus $as_me 11.1.3, which was
|
||||||
generated by GNU Autoconf 2.69. Invocation command line was
|
generated by GNU Autoconf 2.69. Invocation command line was
|
||||||
|
|
||||||
$ $0 $@
|
$ $0 $@
|
||||||
|
@ -5393,7 +5393,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
|
||||||
# report actual input values of CONFIG_FILES etc. instead of their
|
# report actual input values of CONFIG_FILES etc. instead of their
|
||||||
# values after options handling.
|
# values after options handling.
|
||||||
ac_log="
|
ac_log="
|
||||||
This file was extended by Citus $as_me 11.1devel, which was
|
This file was extended by Citus $as_me 11.1.3, which was
|
||||||
generated by GNU Autoconf 2.69. Invocation command line was
|
generated by GNU Autoconf 2.69. Invocation command line was
|
||||||
|
|
||||||
CONFIG_FILES = $CONFIG_FILES
|
CONFIG_FILES = $CONFIG_FILES
|
||||||
|
@ -5455,7 +5455,7 @@ _ACEOF
|
||||||
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
|
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
|
||||||
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
|
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
|
||||||
ac_cs_version="\\
|
ac_cs_version="\\
|
||||||
Citus config.status 11.1devel
|
Citus config.status 11.1.3
|
||||||
configured by $0, generated by GNU Autoconf 2.69,
|
configured by $0, generated by GNU Autoconf 2.69,
|
||||||
with options \\"\$ac_cs_config\\"
|
with options \\"\$ac_cs_config\\"
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
# everyone needing autoconf installed, the resulting files are checked
|
# everyone needing autoconf installed, the resulting files are checked
|
||||||
# into the SCM.
|
# into the SCM.
|
||||||
|
|
||||||
AC_INIT([Citus], [11.1devel])
|
AC_INIT([Citus], [11.1.3])
|
||||||
AC_COPYRIGHT([Copyright (c) Citus Data, Inc.])
|
AC_COPYRIGHT([Copyright (c) Citus Data, Inc.])
|
||||||
|
|
||||||
# we'll need sed and awk for some of the version commands
|
# we'll need sed and awk for some of the version commands
|
||||||
|
|
|
@ -1650,6 +1650,9 @@ create_estate_for_relation(Relation rel)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* DatumToBytea serializes a datum into a bytea value.
|
* DatumToBytea serializes a datum into a bytea value.
|
||||||
|
*
|
||||||
|
* Since we don't want to limit datum size to RSIZE_MAX unnecessarily,
|
||||||
|
* we use memcpy instead of memcpy_s several places in this function.
|
||||||
*/
|
*/
|
||||||
static bytea *
|
static bytea *
|
||||||
DatumToBytea(Datum value, Form_pg_attribute attrForm)
|
DatumToBytea(Datum value, Form_pg_attribute attrForm)
|
||||||
|
@ -1666,19 +1669,16 @@ DatumToBytea(Datum value, Form_pg_attribute attrForm)
|
||||||
Datum tmp;
|
Datum tmp;
|
||||||
store_att_byval(&tmp, value, attrForm->attlen);
|
store_att_byval(&tmp, value, attrForm->attlen);
|
||||||
|
|
||||||
memcpy_s(VARDATA(result), datumLength + VARHDRSZ,
|
memcpy(VARDATA(result), &tmp, attrForm->attlen); /* IGNORE-BANNED */
|
||||||
&tmp, attrForm->attlen);
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
memcpy_s(VARDATA(result), datumLength + VARHDRSZ,
|
memcpy(VARDATA(result), DatumGetPointer(value), attrForm->attlen); /* IGNORE-BANNED */
|
||||||
DatumGetPointer(value), attrForm->attlen);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
memcpy_s(VARDATA(result), datumLength + VARHDRSZ,
|
memcpy(VARDATA(result), DatumGetPointer(value), datumLength); /* IGNORE-BANNED */
|
||||||
DatumGetPointer(value), datumLength);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
|
@ -1697,8 +1697,12 @@ ByteaToDatum(bytea *bytes, Form_pg_attribute attrForm)
|
||||||
* after the byteaDatum is freed.
|
* after the byteaDatum is freed.
|
||||||
*/
|
*/
|
||||||
char *binaryDataCopy = palloc0(VARSIZE_ANY_EXHDR(bytes));
|
char *binaryDataCopy = palloc0(VARSIZE_ANY_EXHDR(bytes));
|
||||||
memcpy_s(binaryDataCopy, VARSIZE_ANY_EXHDR(bytes),
|
|
||||||
VARDATA_ANY(bytes), VARSIZE_ANY_EXHDR(bytes));
|
/*
|
||||||
|
* We use IGNORE-BANNED here since we don't want to limit datum size to
|
||||||
|
* RSIZE_MAX unnecessarily.
|
||||||
|
*/
|
||||||
|
memcpy(binaryDataCopy, VARDATA_ANY(bytes), VARSIZE_ANY_EXHDR(bytes)); /* IGNORE-BANNED */
|
||||||
|
|
||||||
return fetch_att(binaryDataCopy, attrForm->attbyval, attrForm->attlen);
|
return fetch_att(binaryDataCopy, attrForm->attbyval, attrForm->attlen);
|
||||||
}
|
}
|
||||||
|
|
|
@ -739,7 +739,9 @@ columnar_tuple_insert(Relation relation, TupleTableSlot *slot, CommandId cid,
|
||||||
*/
|
*/
|
||||||
ColumnarWriteState *writeState = columnar_init_write_state(relation,
|
ColumnarWriteState *writeState = columnar_init_write_state(relation,
|
||||||
RelationGetDescr(relation),
|
RelationGetDescr(relation),
|
||||||
|
slot->tts_tableOid,
|
||||||
GetCurrentSubTransactionId());
|
GetCurrentSubTransactionId());
|
||||||
|
|
||||||
MemoryContext oldContext = MemoryContextSwitchTo(ColumnarWritePerTupleContext(
|
MemoryContext oldContext = MemoryContextSwitchTo(ColumnarWritePerTupleContext(
|
||||||
writeState));
|
writeState));
|
||||||
|
|
||||||
|
@ -781,8 +783,14 @@ columnar_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
|
||||||
{
|
{
|
||||||
CheckCitusColumnarVersion(ERROR);
|
CheckCitusColumnarVersion(ERROR);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The callback to .multi_insert is table_multi_insert() and this is only used for the COPY
|
||||||
|
* command, so slot[i]->tts_tableoid will always be equal to relation->id. Thus, we can send
|
||||||
|
* RelationGetRelid(relation) as the tupSlotTableOid
|
||||||
|
*/
|
||||||
ColumnarWriteState *writeState = columnar_init_write_state(relation,
|
ColumnarWriteState *writeState = columnar_init_write_state(relation,
|
||||||
RelationGetDescr(relation),
|
RelationGetDescr(relation),
|
||||||
|
RelationGetRelid(relation),
|
||||||
GetCurrentSubTransactionId());
|
GetCurrentSubTransactionId());
|
||||||
|
|
||||||
ColumnarCheckLogicalReplication(relation);
|
ColumnarCheckLogicalReplication(relation);
|
||||||
|
@ -2568,8 +2576,13 @@ detoast_values(TupleDesc tupleDesc, Datum *orig_values, bool *isnull)
|
||||||
if (values == orig_values)
|
if (values == orig_values)
|
||||||
{
|
{
|
||||||
values = palloc(sizeof(Datum) * natts);
|
values = palloc(sizeof(Datum) * natts);
|
||||||
memcpy_s(values, sizeof(Datum) * natts,
|
|
||||||
orig_values, sizeof(Datum) * natts);
|
/*
|
||||||
|
* We use IGNORE-BANNED here since we don't want to limit
|
||||||
|
* size of the buffer that holds the datum array to RSIZE_MAX
|
||||||
|
* unnecessarily.
|
||||||
|
*/
|
||||||
|
memcpy(values, orig_values, sizeof(Datum) * natts); /* IGNORE-BANNED */
|
||||||
}
|
}
|
||||||
|
|
||||||
/* will be freed when per-tuple context is reset */
|
/* will be freed when per-tuple context is reset */
|
||||||
|
|
|
@ -531,6 +531,9 @@ SerializeBoolArray(bool *boolArray, uint32 boolArrayLength)
|
||||||
/*
|
/*
|
||||||
* SerializeSingleDatum serializes the given datum value and appends it to the
|
* SerializeSingleDatum serializes the given datum value and appends it to the
|
||||||
* provided string info buffer.
|
* provided string info buffer.
|
||||||
|
*
|
||||||
|
* Since we don't want to limit datum buffer size to RSIZE_MAX unnecessarily,
|
||||||
|
* we use memcpy instead of memcpy_s several places in this function.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
SerializeSingleDatum(StringInfo datumBuffer, Datum datum, bool datumTypeByValue,
|
SerializeSingleDatum(StringInfo datumBuffer, Datum datum, bool datumTypeByValue,
|
||||||
|
@ -552,15 +555,13 @@ SerializeSingleDatum(StringInfo datumBuffer, Datum datum, bool datumTypeByValue,
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
memcpy_s(currentDatumDataPointer, datumBuffer->maxlen - datumBuffer->len,
|
memcpy(currentDatumDataPointer, DatumGetPointer(datum), datumTypeLength); /* IGNORE-BANNED */
|
||||||
DatumGetPointer(datum), datumTypeLength);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
Assert(!datumTypeByValue);
|
Assert(!datumTypeByValue);
|
||||||
memcpy_s(currentDatumDataPointer, datumBuffer->maxlen - datumBuffer->len,
|
memcpy(currentDatumDataPointer, DatumGetPointer(datum), datumLength); /* IGNORE-BANNED */
|
||||||
DatumGetPointer(datum), datumLength);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
datumBuffer->len += datumLengthAligned;
|
datumBuffer->len += datumLengthAligned;
|
||||||
|
@ -714,7 +715,12 @@ DatumCopy(Datum datum, bool datumTypeByValue, int datumTypeLength)
|
||||||
{
|
{
|
||||||
uint32 datumLength = att_addlength_datum(0, datumTypeLength, datum);
|
uint32 datumLength = att_addlength_datum(0, datumTypeLength, datum);
|
||||||
char *datumData = palloc0(datumLength);
|
char *datumData = palloc0(datumLength);
|
||||||
memcpy_s(datumData, datumLength, DatumGetPointer(datum), datumLength);
|
|
||||||
|
/*
|
||||||
|
* We use IGNORE-BANNED here since we don't want to limit datum size to
|
||||||
|
* RSIZE_MAX unnecessarily.
|
||||||
|
*/
|
||||||
|
memcpy(datumData, DatumGetPointer(datum), datumLength); /* IGNORE-BANNED */
|
||||||
|
|
||||||
datumCopy = PointerGetDatum(datumData);
|
datumCopy = PointerGetDatum(datumData);
|
||||||
}
|
}
|
||||||
|
@ -737,8 +743,12 @@ CopyStringInfo(StringInfo sourceString)
|
||||||
targetString->data = palloc0(sourceString->len);
|
targetString->data = palloc0(sourceString->len);
|
||||||
targetString->len = sourceString->len;
|
targetString->len = sourceString->len;
|
||||||
targetString->maxlen = sourceString->len;
|
targetString->maxlen = sourceString->len;
|
||||||
memcpy_s(targetString->data, sourceString->len,
|
|
||||||
sourceString->data, sourceString->len);
|
/*
|
||||||
|
* We use IGNORE-BANNED here since we don't want to limit string
|
||||||
|
* buffer size to RSIZE_MAX unnecessarily.
|
||||||
|
*/
|
||||||
|
memcpy(targetString->data, sourceString->data, sourceString->len); /* IGNORE-BANNED */
|
||||||
}
|
}
|
||||||
|
|
||||||
return targetString;
|
return targetString;
|
||||||
|
|
|
@ -10,6 +10,17 @@
|
||||||
--
|
--
|
||||||
-- To do that, drop stripe_first_row_number_idx and create a unique
|
-- To do that, drop stripe_first_row_number_idx and create a unique
|
||||||
-- constraint with the same name to keep the code change at minimum.
|
-- constraint with the same name to keep the code change at minimum.
|
||||||
|
--
|
||||||
|
-- If we have a pg_depend entry for this index, we can not drop it as
|
||||||
|
-- the extension depends on it. Remove the pg_depend entry if it exists.
|
||||||
|
DELETE FROM pg_depend
|
||||||
|
WHERE classid = 'pg_am'::regclass::oid
|
||||||
|
AND objid IN (select oid from pg_am where amname = 'columnar')
|
||||||
|
AND objsubid = 0
|
||||||
|
AND refclassid = 'pg_class'::regclass::oid
|
||||||
|
AND refobjid = 'columnar.stripe_first_row_number_idx'::regclass::oid
|
||||||
|
AND refobjsubid = 0
|
||||||
|
AND deptype = 'n';
|
||||||
DROP INDEX columnar.stripe_first_row_number_idx;
|
DROP INDEX columnar.stripe_first_row_number_idx;
|
||||||
ALTER TABLE columnar.stripe ADD CONSTRAINT stripe_first_row_number_idx
|
ALTER TABLE columnar.stripe ADD CONSTRAINT stripe_first_row_number_idx
|
||||||
UNIQUE (storage_id, first_row_number);
|
UNIQUE (storage_id, first_row_number);
|
||||||
|
|
|
@ -8,5 +8,16 @@ DROP FUNCTION citus_internal.upgrade_columnar_storage(regclass);
|
||||||
DROP FUNCTION citus_internal.downgrade_columnar_storage(regclass);
|
DROP FUNCTION citus_internal.downgrade_columnar_storage(regclass);
|
||||||
|
|
||||||
-- drop "first_row_number" column and the index defined on it
|
-- drop "first_row_number" column and the index defined on it
|
||||||
|
--
|
||||||
|
-- If we have a pg_depend entry for this index, we can not drop it as
|
||||||
|
-- the extension depends on it. Remove the pg_depend entry if it exists.
|
||||||
|
DELETE FROM pg_depend
|
||||||
|
WHERE classid = 'pg_am'::regclass::oid
|
||||||
|
AND objid IN (select oid from pg_am where amname = 'columnar')
|
||||||
|
AND objsubid = 0
|
||||||
|
AND refclassid = 'pg_class'::regclass::oid
|
||||||
|
AND refobjid = 'columnar.stripe_first_row_number_idx'::regclass::oid
|
||||||
|
AND refobjsubid = 0
|
||||||
|
AND deptype = 'n';
|
||||||
DROP INDEX columnar.stripe_first_row_number_idx;
|
DROP INDEX columnar.stripe_first_row_number_idx;
|
||||||
ALTER TABLE columnar.stripe DROP COLUMN first_row_number;
|
ALTER TABLE columnar.stripe DROP COLUMN first_row_number;
|
||||||
|
|
|
@ -1,4 +1,14 @@
|
||||||
-- columnar--10.2-3--10.2-2.sql
|
-- columnar--10.2-3--10.2-2.sql
|
||||||
|
--
|
||||||
|
-- If we have a pg_depend entry for this index, we can not drop it as
|
||||||
|
-- the extension depends on it. Remove the pg_depend entry if it exists.
|
||||||
|
DELETE FROM pg_depend
|
||||||
|
WHERE classid = 'pg_am'::regclass::oid
|
||||||
|
AND objid IN (select oid from pg_am where amname = 'columnar')
|
||||||
|
AND objsubid = 0
|
||||||
|
AND refclassid = 'pg_class'::regclass::oid
|
||||||
|
AND refobjid = 'columnar.stripe_first_row_number_idx'::regclass::oid
|
||||||
|
AND refobjsubid = 0
|
||||||
|
AND deptype = 'n';
|
||||||
ALTER TABLE columnar.stripe DROP CONSTRAINT stripe_first_row_number_idx;
|
ALTER TABLE columnar.stripe DROP CONSTRAINT stripe_first_row_number_idx;
|
||||||
CREATE INDEX stripe_first_row_number_idx ON columnar.stripe USING BTREE(storage_id, first_row_number);
|
CREATE INDEX stripe_first_row_number_idx ON columnar.stripe USING BTREE(storage_id, first_row_number);
|
||||||
|
|
|
@ -113,6 +113,7 @@ CleanupWriteStateMap(void *arg)
|
||||||
|
|
||||||
ColumnarWriteState *
|
ColumnarWriteState *
|
||||||
columnar_init_write_state(Relation relation, TupleDesc tupdesc,
|
columnar_init_write_state(Relation relation, TupleDesc tupdesc,
|
||||||
|
Oid tupSlotRelationId,
|
||||||
SubTransactionId currentSubXid)
|
SubTransactionId currentSubXid)
|
||||||
{
|
{
|
||||||
bool found;
|
bool found;
|
||||||
|
@ -176,7 +177,16 @@ columnar_init_write_state(Relation relation, TupleDesc tupdesc,
|
||||||
MemoryContext oldContext = MemoryContextSwitchTo(WriteStateContext);
|
MemoryContext oldContext = MemoryContextSwitchTo(WriteStateContext);
|
||||||
|
|
||||||
ColumnarOptions columnarOptions = { 0 };
|
ColumnarOptions columnarOptions = { 0 };
|
||||||
ReadColumnarOptions(relation->rd_id, &columnarOptions);
|
|
||||||
|
/*
|
||||||
|
* In case of a table rewrite, we need to fetch table options based on the
|
||||||
|
* relation id of the source tuple slot.
|
||||||
|
*
|
||||||
|
* For this reason, we always pass tupSlotRelationId here; which should be
|
||||||
|
* same as the target table if the write operation is not related to a table
|
||||||
|
* rewrite etc.
|
||||||
|
*/
|
||||||
|
ReadColumnarOptions(tupSlotRelationId, &columnarOptions);
|
||||||
|
|
||||||
SubXidWriteState *stackEntry = palloc0(sizeof(SubXidWriteState));
|
SubXidWriteState *stackEntry = palloc0(sizeof(SubXidWriteState));
|
||||||
stackEntry->writeState = ColumnarBeginWrite(relation->rd_node,
|
stackEntry->writeState = ColumnarBeginWrite(relation->rd_node,
|
||||||
|
|
|
@ -1223,8 +1223,15 @@ CreateDistributedTableLike(TableConversionState *con)
|
||||||
newShardCount = con->shardCount;
|
newShardCount = con->shardCount;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* To get the correct column name, we use the original relation id, not the
|
||||||
|
* new relation id. The reason is that the cached attributes of the original
|
||||||
|
* and newly created tables are not the same if the original table has
|
||||||
|
* dropped columns (dropped columns are still present in the attribute cache)
|
||||||
|
* Detailed example in https://github.com/citusdata/citus/pull/6387
|
||||||
|
*/
|
||||||
char *distributionColumnName =
|
char *distributionColumnName =
|
||||||
ColumnToColumnName(con->newRelationId, (Node *) newDistributionKey);
|
ColumnToColumnName(con->relationId, (Node *) newDistributionKey);
|
||||||
|
|
||||||
Oid originalRelationId = con->relationId;
|
Oid originalRelationId = con->relationId;
|
||||||
if (con->originalDistributionKey != NULL && PartitionTable(originalRelationId))
|
if (con->originalDistributionKey != NULL && PartitionTable(originalRelationId))
|
||||||
|
@ -1604,6 +1611,8 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands,
|
||||||
}
|
}
|
||||||
else if (ShouldSyncTableMetadata(sourceId))
|
else if (ShouldSyncTableMetadata(sourceId))
|
||||||
{
|
{
|
||||||
|
char *qualifiedTableName = quote_qualified_identifier(schemaName, sourceName);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We are converting a citus local table to a distributed/reference table,
|
* We are converting a citus local table to a distributed/reference table,
|
||||||
* so we should prevent dropping the sequence on the table. Otherwise, we'd
|
* so we should prevent dropping the sequence on the table. Otherwise, we'd
|
||||||
|
@ -1612,8 +1621,8 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands,
|
||||||
StringInfo command = makeStringInfo();
|
StringInfo command = makeStringInfo();
|
||||||
|
|
||||||
appendStringInfo(command,
|
appendStringInfo(command,
|
||||||
"SELECT pg_catalog.worker_drop_sequence_dependency('%s');",
|
"SELECT pg_catalog.worker_drop_sequence_dependency(%s);",
|
||||||
quote_qualified_identifier(schemaName, sourceName));
|
quote_literal_cstr(qualifiedTableName));
|
||||||
|
|
||||||
SendCommandToWorkersWithMetadata(command->data);
|
SendCommandToWorkersWithMetadata(command->data);
|
||||||
}
|
}
|
||||||
|
@ -1903,11 +1912,17 @@ CreateWorkerChangeSequenceDependencyCommand(char *sequenceSchemaName, char *sequ
|
||||||
char *sourceSchemaName, char *sourceName,
|
char *sourceSchemaName, char *sourceName,
|
||||||
char *targetSchemaName, char *targetName)
|
char *targetSchemaName, char *targetName)
|
||||||
{
|
{
|
||||||
|
char *qualifiedSchemaName = quote_qualified_identifier(sequenceSchemaName,
|
||||||
|
sequenceName);
|
||||||
|
char *qualifiedSourceName = quote_qualified_identifier(sourceSchemaName, sourceName);
|
||||||
|
char *qualifiedTargetName = quote_qualified_identifier(targetSchemaName, targetName);
|
||||||
|
|
||||||
StringInfo query = makeStringInfo();
|
StringInfo query = makeStringInfo();
|
||||||
appendStringInfo(query, "SELECT worker_change_sequence_dependency('%s', '%s', '%s')",
|
appendStringInfo(query, "SELECT worker_change_sequence_dependency(%s, %s, %s)",
|
||||||
quote_qualified_identifier(sequenceSchemaName, sequenceName),
|
quote_literal_cstr(qualifiedSchemaName),
|
||||||
quote_qualified_identifier(sourceSchemaName, sourceName),
|
quote_literal_cstr(qualifiedSourceName),
|
||||||
quote_qualified_identifier(targetSchemaName, targetName));
|
quote_literal_cstr(qualifiedTargetName));
|
||||||
|
|
||||||
return query->data;
|
return query->data;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -87,7 +87,7 @@ static List * ReversedOidList(List *oidList);
|
||||||
static void AppendExplicitIndexIdsToList(Form_pg_index indexForm,
|
static void AppendExplicitIndexIdsToList(Form_pg_index indexForm,
|
||||||
List **explicitIndexIdList,
|
List **explicitIndexIdList,
|
||||||
int flags);
|
int flags);
|
||||||
static void DropDefaultExpressionsAndMoveOwnedSequenceOwnerships(Oid sourceRelationId,
|
static void DropNextValExprsAndMoveOwnedSeqOwnerships(Oid sourceRelationId,
|
||||||
Oid targetRelationId);
|
Oid targetRelationId);
|
||||||
static void DropDefaultColumnDefinition(Oid relationId, char *columnName);
|
static void DropDefaultColumnDefinition(Oid relationId, char *columnName);
|
||||||
static void TransferSequenceOwnership(Oid ownedSequenceId, Oid targetRelationId,
|
static void TransferSequenceOwnership(Oid ownedSequenceId, Oid targetRelationId,
|
||||||
|
@ -128,6 +128,9 @@ citus_add_local_table_to_metadata_internal(Oid relationId, bool cascadeViaForeig
|
||||||
{
|
{
|
||||||
CheckCitusVersion(ERROR);
|
CheckCitusVersion(ERROR);
|
||||||
|
|
||||||
|
/* enable citus_add_local_table_to_metadata on an empty node */
|
||||||
|
InsertCoordinatorIfClusterEmpty();
|
||||||
|
|
||||||
bool autoConverted = false;
|
bool autoConverted = false;
|
||||||
CreateCitusLocalTable(relationId, cascadeViaForeignKeys, autoConverted);
|
CreateCitusLocalTable(relationId, cascadeViaForeignKeys, autoConverted);
|
||||||
}
|
}
|
||||||
|
@ -363,10 +366,10 @@ CreateCitusLocalTable(Oid relationId, bool cascadeViaForeignKeys, bool autoConve
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Move sequence ownerships from shard table to shell table and also drop
|
* Move sequence ownerships from shard table to shell table and also drop
|
||||||
* DEFAULT expressions from shard relation as we should evaluate such columns
|
* DEFAULT expressions based on sequences from shard relation as we should
|
||||||
* in shell table when needed.
|
* evaluate such columns in shell table when needed.
|
||||||
*/
|
*/
|
||||||
DropDefaultExpressionsAndMoveOwnedSequenceOwnerships(shardRelationId,
|
DropNextValExprsAndMoveOwnedSeqOwnerships(shardRelationId,
|
||||||
shellRelationId);
|
shellRelationId);
|
||||||
|
|
||||||
InsertMetadataForCitusLocalTable(shellRelationId, shardId, autoConverted);
|
InsertMetadataForCitusLocalTable(shellRelationId, shardId, autoConverted);
|
||||||
|
@ -1158,13 +1161,14 @@ GetRenameStatsCommandList(List *statsOidList, uint64 shardId)
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* DropDefaultExpressionsAndMoveOwnedSequenceOwnerships drops default column
|
* DropNextValExprsAndMoveOwnedSeqOwnerships drops default column definitions
|
||||||
* definitions for relation with sourceRelationId. Also, for each column that
|
* that are based on sequences for relation with sourceRelationId.
|
||||||
* defaults to an owned sequence, it grants ownership to the same named column
|
*
|
||||||
* of the relation with targetRelationId.
|
* Also, for each such column that owns a sequence, it grants ownership to the
|
||||||
|
* same named column of the relation with targetRelationId.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
DropDefaultExpressionsAndMoveOwnedSequenceOwnerships(Oid sourceRelationId,
|
DropNextValExprsAndMoveOwnedSeqOwnerships(Oid sourceRelationId,
|
||||||
Oid targetRelationId)
|
Oid targetRelationId)
|
||||||
{
|
{
|
||||||
List *columnNameList = NIL;
|
List *columnNameList = NIL;
|
||||||
|
@ -1175,10 +1179,29 @@ DropDefaultExpressionsAndMoveOwnedSequenceOwnerships(Oid sourceRelationId,
|
||||||
char *columnName = NULL;
|
char *columnName = NULL;
|
||||||
Oid ownedSequenceId = InvalidOid;
|
Oid ownedSequenceId = InvalidOid;
|
||||||
forboth_ptr_oid(columnName, columnNameList, ownedSequenceId, ownedSequenceIdList)
|
forboth_ptr_oid(columnName, columnNameList, ownedSequenceId, ownedSequenceIdList)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* We drop nextval() expressions because Citus currently evaluates
|
||||||
|
* nextval() on the shell table, not on the shards. Hence, there is
|
||||||
|
* no reason for keeping nextval(). Also, distributed/reference table
|
||||||
|
* shards do not have - so be consistent with those.
|
||||||
|
*
|
||||||
|
* Note that we keep other kind of DEFAULT expressions on shards
|
||||||
|
* because we still want to be able to evaluate DEFAULT expressions
|
||||||
|
* that are not based on sequences on shards, e.g., for foreign key
|
||||||
|
* - SET DEFAULT actions.
|
||||||
|
*/
|
||||||
|
AttrNumber columnAttrNumber = get_attnum(sourceRelationId, columnName);
|
||||||
|
if (ColumnDefaultsToNextVal(sourceRelationId, columnAttrNumber))
|
||||||
{
|
{
|
||||||
DropDefaultColumnDefinition(sourceRelationId, columnName);
|
DropDefaultColumnDefinition(sourceRelationId, columnName);
|
||||||
|
}
|
||||||
|
|
||||||
/* column might not own a sequence */
|
/*
|
||||||
|
* Column might own a sequence without having a nextval() expr on it
|
||||||
|
* --e.g., due to ALTER SEQUENCE OWNED BY .. --, so check if that is
|
||||||
|
* the case even if the column doesn't have a DEFAULT.
|
||||||
|
*/
|
||||||
if (OidIsValid(ownedSequenceId))
|
if (OidIsValid(ownedSequenceId))
|
||||||
{
|
{
|
||||||
TransferSequenceOwnership(ownedSequenceId, targetRelationId, columnName);
|
TransferSequenceOwnership(ownedSequenceId, targetRelationId, columnName);
|
||||||
|
|
|
@ -382,7 +382,6 @@ CreateDistributedTableConcurrently(Oid relationId, char *distributionColumnName,
|
||||||
"citus.shard_replication_factor > 1")));
|
"citus.shard_replication_factor > 1")));
|
||||||
}
|
}
|
||||||
|
|
||||||
EnsureCoordinatorIsInMetadata();
|
|
||||||
EnsureCitusTableCanBeCreated(relationId);
|
EnsureCitusTableCanBeCreated(relationId);
|
||||||
|
|
||||||
EnsureValidDistributionColumn(relationId, distributionColumnName);
|
EnsureValidDistributionColumn(relationId, distributionColumnName);
|
||||||
|
@ -528,6 +527,14 @@ CreateDistributedTableConcurrently(Oid relationId, char *distributionColumnName,
|
||||||
colocatedTableId = ColocatedTableId(colocationId);
|
colocatedTableId = ColocatedTableId(colocationId);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
List *workerNodeList = DistributedTablePlacementNodeList(NoLock);
|
||||||
|
if (workerNodeList == NIL)
|
||||||
|
{
|
||||||
|
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||||
|
errmsg("no worker nodes are available for placing shards"),
|
||||||
|
errhint("Add more worker nodes.")));
|
||||||
|
}
|
||||||
|
|
||||||
List *workersForPlacementList;
|
List *workersForPlacementList;
|
||||||
List *shardSplitPointsList;
|
List *shardSplitPointsList;
|
||||||
|
|
||||||
|
@ -555,7 +562,6 @@ CreateDistributedTableConcurrently(Oid relationId, char *distributionColumnName,
|
||||||
/*
|
/*
|
||||||
* Place shards in a round-robin fashion across all data nodes.
|
* Place shards in a round-robin fashion across all data nodes.
|
||||||
*/
|
*/
|
||||||
List *workerNodeList = DistributedTablePlacementNodeList(NoLock);
|
|
||||||
workersForPlacementList = RoundRobinWorkerNodeList(workerNodeList, shardCount);
|
workersForPlacementList = RoundRobinWorkerNodeList(workerNodeList, shardCount);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -856,6 +862,8 @@ WorkerNodesForShardList(List *shardList)
|
||||||
static List *
|
static List *
|
||||||
RoundRobinWorkerNodeList(List *workerNodeList, int listLength)
|
RoundRobinWorkerNodeList(List *workerNodeList, int listLength)
|
||||||
{
|
{
|
||||||
|
Assert(workerNodeList != NIL);
|
||||||
|
|
||||||
List *nodeIdList = NIL;
|
List *nodeIdList = NIL;
|
||||||
|
|
||||||
for (int idx = 0; idx < listLength; idx++)
|
for (int idx = 0; idx < listLength; idx++)
|
||||||
|
|
|
@ -23,12 +23,14 @@
|
||||||
#include "catalog/pg_type.h"
|
#include "catalog/pg_type.h"
|
||||||
#include "distributed/colocation_utils.h"
|
#include "distributed/colocation_utils.h"
|
||||||
#include "distributed/commands.h"
|
#include "distributed/commands.h"
|
||||||
|
#include "distributed/commands/sequence.h"
|
||||||
#include "distributed/coordinator_protocol.h"
|
#include "distributed/coordinator_protocol.h"
|
||||||
#include "distributed/listutils.h"
|
#include "distributed/listutils.h"
|
||||||
#include "distributed/coordinator_protocol.h"
|
#include "distributed/coordinator_protocol.h"
|
||||||
#include "distributed/multi_join_order.h"
|
#include "distributed/multi_join_order.h"
|
||||||
#include "distributed/namespace_utils.h"
|
#include "distributed/namespace_utils.h"
|
||||||
#include "distributed/reference_table_utils.h"
|
#include "distributed/reference_table_utils.h"
|
||||||
|
#include "distributed/utils/array_type.h"
|
||||||
#include "distributed/version_compat.h"
|
#include "distributed/version_compat.h"
|
||||||
#include "miscadmin.h"
|
#include "miscadmin.h"
|
||||||
#include "utils/builtins.h"
|
#include "utils/builtins.h"
|
||||||
|
@ -57,6 +59,8 @@ typedef bool (*CheckRelationFunc)(Oid);
|
||||||
/* Local functions forward declarations */
|
/* Local functions forward declarations */
|
||||||
static void EnsureReferencingTableNotReplicated(Oid referencingTableId);
|
static void EnsureReferencingTableNotReplicated(Oid referencingTableId);
|
||||||
static void EnsureSupportedFKeyOnDistKey(Form_pg_constraint constraintForm);
|
static void EnsureSupportedFKeyOnDistKey(Form_pg_constraint constraintForm);
|
||||||
|
static bool ForeignKeySetsNextValColumnToDefault(HeapTuple pgConstraintTuple);
|
||||||
|
static List * ForeignKeyGetDefaultingAttrs(HeapTuple pgConstraintTuple);
|
||||||
static void EnsureSupportedFKeyBetweenCitusLocalAndRefTable(Form_pg_constraint
|
static void EnsureSupportedFKeyBetweenCitusLocalAndRefTable(Form_pg_constraint
|
||||||
constraintForm,
|
constraintForm,
|
||||||
char
|
char
|
||||||
|
@ -256,6 +260,23 @@ ErrorIfUnsupportedForeignConstraintExists(Relation relation, char referencingDis
|
||||||
referencedReplicationModel = referencingReplicationModel;
|
referencedReplicationModel = referencingReplicationModel;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Given that we drop DEFAULT nextval('sequence') expressions from
|
||||||
|
* shard relation columns, allowing ON DELETE/UPDATE SET DEFAULT
|
||||||
|
* on such columns causes inserting NULL values to referencing relation
|
||||||
|
* as a result of a delete/update operation on referenced relation.
|
||||||
|
*
|
||||||
|
* For this reason, we disallow ON DELETE/UPDATE SET DEFAULT actions
|
||||||
|
* on columns that default to sequences.
|
||||||
|
*/
|
||||||
|
if (ForeignKeySetsNextValColumnToDefault(heapTuple))
|
||||||
|
{
|
||||||
|
ereport(ERROR, (errmsg("cannot create foreign key constraint "
|
||||||
|
"since Citus does not support ON DELETE "
|
||||||
|
"/ UPDATE SET DEFAULT actions on the "
|
||||||
|
"columns that default to sequences")));
|
||||||
|
}
|
||||||
|
|
||||||
bool referencingIsCitusLocalOrRefTable =
|
bool referencingIsCitusLocalOrRefTable =
|
||||||
(referencingDistMethod == DISTRIBUTE_BY_NONE);
|
(referencingDistMethod == DISTRIBUTE_BY_NONE);
|
||||||
bool referencedIsCitusLocalOrRefTable =
|
bool referencedIsCitusLocalOrRefTable =
|
||||||
|
@ -358,6 +379,104 @@ ErrorIfUnsupportedForeignConstraintExists(Relation relation, char referencingDis
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* ForeignKeySetsNextValColumnToDefault returns true if at least one of the
|
||||||
|
* columns specified in ON DELETE / UPDATE SET DEFAULT clauses default to
|
||||||
|
* nextval().
|
||||||
|
*/
|
||||||
|
static bool
|
||||||
|
ForeignKeySetsNextValColumnToDefault(HeapTuple pgConstraintTuple)
|
||||||
|
{
|
||||||
|
Form_pg_constraint pgConstraintForm =
|
||||||
|
(Form_pg_constraint) GETSTRUCT(pgConstraintTuple);
|
||||||
|
|
||||||
|
List *setDefaultAttrs = ForeignKeyGetDefaultingAttrs(pgConstraintTuple);
|
||||||
|
AttrNumber setDefaultAttr = InvalidAttrNumber;
|
||||||
|
foreach_int(setDefaultAttr, setDefaultAttrs)
|
||||||
|
{
|
||||||
|
if (ColumnDefaultsToNextVal(pgConstraintForm->conrelid, setDefaultAttr))
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* ForeignKeyGetDefaultingAttrs returns a list of AttrNumbers
|
||||||
|
* might be set to default ON DELETE or ON UPDATE.
|
||||||
|
*
|
||||||
|
* For example; if the foreign key has SET DEFAULT clause for
|
||||||
|
* both actions, then returns a superset of the attributes that
|
||||||
|
* might be set to DEFAULT on either of those actions.
|
||||||
|
*/
|
||||||
|
static List *
|
||||||
|
ForeignKeyGetDefaultingAttrs(HeapTuple pgConstraintTuple)
|
||||||
|
{
|
||||||
|
bool isNull = false;
|
||||||
|
Datum referencingColumnsDatum = SysCacheGetAttr(CONSTROID, pgConstraintTuple,
|
||||||
|
Anum_pg_constraint_conkey, &isNull);
|
||||||
|
if (isNull)
|
||||||
|
{
|
||||||
|
ereport(ERROR, (errmsg("got NULL conkey from catalog")));
|
||||||
|
}
|
||||||
|
|
||||||
|
List *referencingColumns =
|
||||||
|
IntegerArrayTypeToList(DatumGetArrayTypeP(referencingColumnsDatum));
|
||||||
|
|
||||||
|
Form_pg_constraint pgConstraintForm =
|
||||||
|
(Form_pg_constraint) GETSTRUCT(pgConstraintTuple);
|
||||||
|
if (pgConstraintForm->confupdtype == FKCONSTR_ACTION_SETDEFAULT)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Postgres doesn't allow specifying SET DEFAULT for a subset of
|
||||||
|
* the referencing columns for ON UPDATE action, so in that case
|
||||||
|
* we return all referencing columns regardless of what ON DELETE
|
||||||
|
* action says.
|
||||||
|
*/
|
||||||
|
return referencingColumns;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pgConstraintForm->confdeltype != FKCONSTR_ACTION_SETDEFAULT)
|
||||||
|
{
|
||||||
|
return NIL;
|
||||||
|
}
|
||||||
|
|
||||||
|
List *onDeleteSetDefColumnList = NIL;
|
||||||
|
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||||
|
Datum onDeleteSetDefColumnsDatum = SysCacheGetAttr(CONSTROID, pgConstraintTuple,
|
||||||
|
Anum_pg_constraint_confdelsetcols,
|
||||||
|
&isNull);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* confdelsetcols being NULL means that "ON DELETE SET DEFAULT" doesn't
|
||||||
|
* specify which subset of columns should be set to DEFAULT, so fetching
|
||||||
|
* NULL from the catalog is also possible.
|
||||||
|
*/
|
||||||
|
if (!isNull)
|
||||||
|
{
|
||||||
|
onDeleteSetDefColumnList =
|
||||||
|
IntegerArrayTypeToList(DatumGetArrayTypeP(onDeleteSetDefColumnsDatum));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
if (list_length(onDeleteSetDefColumnList) == 0)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* That means that all referencing columns need to be set to
|
||||||
|
* DEFAULT.
|
||||||
|
*/
|
||||||
|
return referencingColumns;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
return onDeleteSetDefColumnList;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* EnsureSupportedFKeyBetweenCitusLocalAndRefTable is a helper function that
|
* EnsureSupportedFKeyBetweenCitusLocalAndRefTable is a helper function that
|
||||||
* takes a foreign key constraint form for a foreign key between two citus
|
* takes a foreign key constraint form for a foreign key between two citus
|
||||||
|
|
|
@ -354,17 +354,23 @@ ExtractEncryptedPassword(Oid roleOid)
|
||||||
|
|
||||||
Datum passwordDatum = heap_getattr(tuple, Anum_pg_authid_rolpassword,
|
Datum passwordDatum = heap_getattr(tuple, Anum_pg_authid_rolpassword,
|
||||||
pgAuthIdDescription, &isNull);
|
pgAuthIdDescription, &isNull);
|
||||||
char *passwordCstring = TextDatumGetCString(passwordDatum);
|
|
||||||
|
/*
|
||||||
|
* In PG, an empty password is treated the same as NULL.
|
||||||
|
* So we propagate NULL password to the other nodes, even if
|
||||||
|
* the user supplied an empty password
|
||||||
|
*/
|
||||||
|
|
||||||
|
char *passwordCstring = NULL;
|
||||||
|
if (!isNull)
|
||||||
|
{
|
||||||
|
passwordCstring = pstrdup(TextDatumGetCString(passwordDatum));
|
||||||
|
}
|
||||||
|
|
||||||
table_close(pgAuthId, AccessShareLock);
|
table_close(pgAuthId, AccessShareLock);
|
||||||
ReleaseSysCache(tuple);
|
ReleaseSysCache(tuple);
|
||||||
|
|
||||||
if (isNull)
|
return passwordCstring;
|
||||||
{
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
return pstrdup(passwordCstring);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
#include "nodes/makefuncs.h"
|
#include "nodes/makefuncs.h"
|
||||||
#include "distributed/worker_create_or_replace.h"
|
#include "distributed/worker_create_or_replace.h"
|
||||||
#include "nodes/parsenodes.h"
|
#include "nodes/parsenodes.h"
|
||||||
|
#include "rewrite/rewriteHandler.h"
|
||||||
#include "utils/builtins.h"
|
#include "utils/builtins.h"
|
||||||
#include "utils/lsyscache.h"
|
#include "utils/lsyscache.h"
|
||||||
|
|
||||||
|
@ -213,6 +214,29 @@ ExtractDefaultColumnsAndOwnedSequences(Oid relationId, List **columnNameList,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* ColumnDefaultsToNextVal returns true if the column with attrNumber
|
||||||
|
* has a default expression that contains nextval().
|
||||||
|
*/
|
||||||
|
bool
|
||||||
|
ColumnDefaultsToNextVal(Oid relationId, AttrNumber attrNumber)
|
||||||
|
{
|
||||||
|
AssertArg(AttributeNumberIsValid(attrNumber));
|
||||||
|
|
||||||
|
Relation relation = RelationIdGetRelation(relationId);
|
||||||
|
Node *defExpr = build_column_default(relation, attrNumber);
|
||||||
|
RelationClose(relation);
|
||||||
|
|
||||||
|
if (defExpr == NULL)
|
||||||
|
{
|
||||||
|
/* column doesn't have a DEFAULT expression */
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return contain_nextval_expression_walker(defExpr, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* PreprocessDropSequenceStmt gets called during the planning phase of a DROP SEQUENCE statement
|
* PreprocessDropSequenceStmt gets called during the planning phase of a DROP SEQUENCE statement
|
||||||
* and returns a list of DDLJob's that will drop any distributed sequences from the
|
* and returns a list of DDLJob's that will drop any distributed sequences from the
|
||||||
|
|
|
@ -43,6 +43,7 @@
|
||||||
|
|
||||||
|
|
||||||
/* local function forward declarations */
|
/* local function forward declarations */
|
||||||
|
static char * GetAlterTriggerStateCommand(Oid triggerId);
|
||||||
static bool IsCreateCitusTruncateTriggerStmt(CreateTrigStmt *createTriggerStmt);
|
static bool IsCreateCitusTruncateTriggerStmt(CreateTrigStmt *createTriggerStmt);
|
||||||
static String * GetAlterTriggerDependsTriggerNameValue(AlterObjectDependsStmt *
|
static String * GetAlterTriggerDependsTriggerNameValue(AlterObjectDependsStmt *
|
||||||
alterTriggerDependsStmt);
|
alterTriggerDependsStmt);
|
||||||
|
@ -99,6 +100,18 @@ GetExplicitTriggerCommandList(Oid relationId)
|
||||||
createTriggerCommandList = lappend(
|
createTriggerCommandList = lappend(
|
||||||
createTriggerCommandList,
|
createTriggerCommandList,
|
||||||
makeTableDDLCommandString(createTriggerCommand));
|
makeTableDDLCommandString(createTriggerCommand));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Appends the commands for the trigger settings that are not covered
|
||||||
|
* by CREATE TRIGGER command, such as ALTER TABLE ENABLE/DISABLE <trigger>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
char *alterTriggerStateCommand =
|
||||||
|
GetAlterTriggerStateCommand(triggerId);
|
||||||
|
|
||||||
|
createTriggerCommandList = lappend(
|
||||||
|
createTriggerCommandList,
|
||||||
|
makeTableDDLCommandString(alterTriggerStateCommand));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* revert back to original search_path */
|
/* revert back to original search_path */
|
||||||
|
@ -108,6 +121,72 @@ GetExplicitTriggerCommandList(Oid relationId)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* GetAlterTriggerStateCommand returns the DDL command to set enable/disable
|
||||||
|
* state for given trigger. Throws an error if no such trigger exists.
|
||||||
|
*/
|
||||||
|
static char *
|
||||||
|
GetAlterTriggerStateCommand(Oid triggerId)
|
||||||
|
{
|
||||||
|
StringInfo alterTriggerStateCommand = makeStringInfo();
|
||||||
|
|
||||||
|
bool missingOk = false;
|
||||||
|
HeapTuple triggerTuple = GetTriggerTupleById(triggerId, missingOk);
|
||||||
|
|
||||||
|
Form_pg_trigger triggerForm = (Form_pg_trigger) GETSTRUCT(triggerTuple);
|
||||||
|
|
||||||
|
char *qualifiedRelName = generate_qualified_relation_name(triggerForm->tgrelid);
|
||||||
|
const char *quotedTrigName = quote_identifier(NameStr(triggerForm->tgname));
|
||||||
|
char enableDisableState = triggerForm->tgenabled;
|
||||||
|
|
||||||
|
const char *alterTriggerStateStr = NULL;
|
||||||
|
switch (enableDisableState)
|
||||||
|
{
|
||||||
|
case TRIGGER_FIRES_ON_ORIGIN:
|
||||||
|
{
|
||||||
|
/* default mode */
|
||||||
|
alterTriggerStateStr = "ENABLE";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case TRIGGER_FIRES_ALWAYS:
|
||||||
|
{
|
||||||
|
alterTriggerStateStr = "ENABLE ALWAYS";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case TRIGGER_FIRES_ON_REPLICA:
|
||||||
|
{
|
||||||
|
alterTriggerStateStr = "ENABLE REPLICA";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case TRIGGER_DISABLED:
|
||||||
|
{
|
||||||
|
alterTriggerStateStr = "DISABLE";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
{
|
||||||
|
elog(ERROR, "unexpected trigger state");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
appendStringInfo(alterTriggerStateCommand, "ALTER TABLE %s %s TRIGGER %s;",
|
||||||
|
qualifiedRelName, alterTriggerStateStr, quotedTrigName);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Free triggerTuple at the end since quote_identifier() might not return
|
||||||
|
* a palloc'd string if given identifier doesn't need to be quoted, and in
|
||||||
|
* that case quotedTrigName would still be bound to triggerTuple.
|
||||||
|
*/
|
||||||
|
heap_freetuple(triggerTuple);
|
||||||
|
|
||||||
|
return alterTriggerStateCommand->data;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* GetTriggerTupleById returns copy of the heap tuple from pg_trigger for
|
* GetTriggerTupleById returns copy of the heap tuple from pg_trigger for
|
||||||
* the trigger with triggerId. If no such trigger exists, this function returns
|
* the trigger with triggerId. If no such trigger exists, this function returns
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
#include "miscadmin.h"
|
#include "miscadmin.h"
|
||||||
|
|
||||||
#include "safe_lib.h"
|
#include "safe_lib.h"
|
||||||
|
#include "postmaster/postmaster.h"
|
||||||
#include "access/hash.h"
|
#include "access/hash.h"
|
||||||
#include "commands/dbcommands.h"
|
#include "commands/dbcommands.h"
|
||||||
#include "distributed/backend_data.h"
|
#include "distributed/backend_data.h"
|
||||||
|
@ -63,7 +63,6 @@ static void FreeConnParamsHashEntryFields(ConnParamsHashEntry *entry);
|
||||||
static void AfterXactHostConnectionHandling(ConnectionHashEntry *entry, bool isCommit);
|
static void AfterXactHostConnectionHandling(ConnectionHashEntry *entry, bool isCommit);
|
||||||
static bool ShouldShutdownConnection(MultiConnection *connection, const int
|
static bool ShouldShutdownConnection(MultiConnection *connection, const int
|
||||||
cachedConnectionCount);
|
cachedConnectionCount);
|
||||||
static void ResetConnection(MultiConnection *connection);
|
|
||||||
static bool RemoteTransactionIdle(MultiConnection *connection);
|
static bool RemoteTransactionIdle(MultiConnection *connection);
|
||||||
static int EventSetSizeForConnectionList(List *connections);
|
static int EventSetSizeForConnectionList(List *connections);
|
||||||
|
|
||||||
|
@ -244,6 +243,23 @@ GetNodeUserDatabaseConnection(uint32 flags, const char *hostname, int32 port,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* GetConnectionForLocalQueriesOutsideTransaction returns a localhost connection for
|
||||||
|
* subtransaction. To avoid creating excessive connections, we reuse an
|
||||||
|
* existing connection.
|
||||||
|
*/
|
||||||
|
MultiConnection *
|
||||||
|
GetConnectionForLocalQueriesOutsideTransaction(char *userName)
|
||||||
|
{
|
||||||
|
int connectionFlag = OUTSIDE_TRANSACTION;
|
||||||
|
MultiConnection *connection =
|
||||||
|
GetNodeUserDatabaseConnection(connectionFlag, LocalHostName, PostPortNumber,
|
||||||
|
userName, get_database_name(MyDatabaseId));
|
||||||
|
|
||||||
|
return connection;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* StartNodeUserDatabaseConnection() initiates a connection to a remote node.
|
* StartNodeUserDatabaseConnection() initiates a connection to a remote node.
|
||||||
*
|
*
|
||||||
|
@ -688,8 +704,8 @@ CloseConnection(MultiConnection *connection)
|
||||||
dlist_delete(&connection->connectionNode);
|
dlist_delete(&connection->connectionNode);
|
||||||
|
|
||||||
/* same for transaction state and shard/placement machinery */
|
/* same for transaction state and shard/placement machinery */
|
||||||
CloseRemoteTransaction(connection);
|
|
||||||
CloseShardPlacementAssociation(connection);
|
CloseShardPlacementAssociation(connection);
|
||||||
|
ResetRemoteTransaction(connection);
|
||||||
|
|
||||||
/* we leave the per-host entry alive */
|
/* we leave the per-host entry alive */
|
||||||
pfree(connection);
|
pfree(connection);
|
||||||
|
@ -1443,7 +1459,10 @@ AfterXactHostConnectionHandling(ConnectionHashEntry *entry, bool isCommit)
|
||||||
/*
|
/*
|
||||||
* reset healthy session lifespan connections.
|
* reset healthy session lifespan connections.
|
||||||
*/
|
*/
|
||||||
ResetConnection(connection);
|
ResetRemoteTransaction(connection);
|
||||||
|
|
||||||
|
UnclaimConnection(connection);
|
||||||
|
|
||||||
|
|
||||||
cachedConnectionCount++;
|
cachedConnectionCount++;
|
||||||
}
|
}
|
||||||
|
@ -1482,24 +1501,6 @@ ShouldShutdownConnection(MultiConnection *connection, const int cachedConnection
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* ResetConnection preserves the given connection for later usage by
|
|
||||||
* resetting its states.
|
|
||||||
*/
|
|
||||||
static void
|
|
||||||
ResetConnection(MultiConnection *connection)
|
|
||||||
{
|
|
||||||
/* reset per-transaction state */
|
|
||||||
ResetRemoteTransaction(connection);
|
|
||||||
ResetShardPlacementAssociation(connection);
|
|
||||||
|
|
||||||
/* reset copy state */
|
|
||||||
connection->copyBytesWrittenSinceLastFlush = 0;
|
|
||||||
|
|
||||||
UnclaimConnection(connection);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* RemoteTransactionIdle function returns true if we manually
|
* RemoteTransactionIdle function returns true if we manually
|
||||||
* set flag on run_commands_on_session_level_connection_to_node to true to
|
* set flag on run_commands_on_session_level_connection_to_node to true to
|
||||||
|
|
|
@ -4022,7 +4022,7 @@ CancelTasksForJob(int64 jobid)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* make sure the current user has the rights to cancel this task */
|
/* make sure the current user has the rights to cancel this task */
|
||||||
Oid taskOwner = DatumGetObjectId(values[Anum_pg_dist_background_task_owner]);
|
Oid taskOwner = DatumGetObjectId(values[Anum_pg_dist_background_task_owner - 1]);
|
||||||
if (superuser_arg(taskOwner) && !superuser())
|
if (superuser_arg(taskOwner) && !superuser())
|
||||||
{
|
{
|
||||||
/* must be a superuser to cancel tasks owned by superuser */
|
/* must be a superuser to cancel tasks owned by superuser */
|
||||||
|
|
|
@ -303,21 +303,40 @@ DropOrphanedShardsForCleanup()
|
||||||
workerNode->workerName,
|
workerNode->workerName,
|
||||||
workerNode->workerPort))
|
workerNode->workerPort))
|
||||||
{
|
{
|
||||||
|
if (record->policy == CLEANUP_DEFERRED_ON_SUCCESS)
|
||||||
|
{
|
||||||
|
ereport(LOG, (errmsg("deferred drop of orphaned shard %s on %s:%d "
|
||||||
|
"completed",
|
||||||
|
qualifiedTableName,
|
||||||
|
workerNode->workerName, workerNode->workerPort)));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
ereport(LOG, (errmsg("cleaned up orphaned shard %s on %s:%d which "
|
||||||
|
"was left behind after a failed operation",
|
||||||
|
qualifiedTableName,
|
||||||
|
workerNode->workerName, workerNode->workerPort)));
|
||||||
|
}
|
||||||
|
|
||||||
/* delete the cleanup record */
|
/* delete the cleanup record */
|
||||||
DeleteCleanupRecordByRecordId(record->recordId);
|
DeleteCleanupRecordByRecordId(record->recordId);
|
||||||
removedShardCountForCleanup++;
|
removedShardCountForCleanup++;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* We log failures at the end, since they occur repeatedly
|
||||||
|
* for a large number of objects.
|
||||||
|
*/
|
||||||
failedShardCountForCleanup++;
|
failedShardCountForCleanup++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (failedShardCountForCleanup > 0)
|
if (failedShardCountForCleanup > 0)
|
||||||
{
|
{
|
||||||
ereport(WARNING, (errmsg("Failed to cleanup %d shards out of %d",
|
ereport(WARNING, (errmsg("failed to clean up %d orphaned shards out of %d",
|
||||||
failedShardCountForCleanup, list_length(
|
failedShardCountForCleanup,
|
||||||
cleanupRecordList))));
|
list_length(cleanupRecordList))));
|
||||||
}
|
}
|
||||||
|
|
||||||
return removedShardCountForCleanup;
|
return removedShardCountForCleanup;
|
||||||
|
@ -396,19 +415,29 @@ DropOrphanedShardsForMove(bool waitForLocks)
|
||||||
shardPlacement->nodeName,
|
shardPlacement->nodeName,
|
||||||
shardPlacement->nodePort))
|
shardPlacement->nodePort))
|
||||||
{
|
{
|
||||||
|
ereport(LOG, (errmsg("deferred drop of orphaned shard %s on %s:%d "
|
||||||
|
"after a move completed",
|
||||||
|
qualifiedTableName,
|
||||||
|
shardPlacement->nodeName,
|
||||||
|
shardPlacement->nodePort)));
|
||||||
|
|
||||||
/* delete the actual placement */
|
/* delete the actual placement */
|
||||||
DeleteShardPlacementRow(placement->placementId);
|
DeleteShardPlacementRow(placement->placementId);
|
||||||
removedShardCount++;
|
removedShardCount++;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* We log failures at the end, since they occur repeatedly
|
||||||
|
* for a large number of objects.
|
||||||
|
*/
|
||||||
failedShardDropCount++;
|
failedShardDropCount++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (failedShardDropCount > 0)
|
if (failedShardDropCount > 0)
|
||||||
{
|
{
|
||||||
ereport(WARNING, (errmsg("Failed to drop %d orphaned shards out of %d",
|
ereport(WARNING, (errmsg("failed to clean up %d orphaned shards out of %d",
|
||||||
failedShardDropCount, list_length(shardPlacementList))));
|
failedShardDropCount, list_length(shardPlacementList))));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -436,7 +465,7 @@ RegisterOperationNeedingCleanup(void)
|
||||||
* completion with failure. This will trigger cleanup of appropriate resources.
|
* completion with failure. This will trigger cleanup of appropriate resources.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
FinalizeOperationNeedingCleanupOnFailure()
|
FinalizeOperationNeedingCleanupOnFailure(const char *operationName)
|
||||||
{
|
{
|
||||||
/* We must have a valid OperationId. Any operation requring cleanup
|
/* We must have a valid OperationId. Any operation requring cleanup
|
||||||
* will call RegisterOperationNeedingCleanup.
|
* will call RegisterOperationNeedingCleanup.
|
||||||
|
@ -454,7 +483,8 @@ FinalizeOperationNeedingCleanupOnFailure()
|
||||||
/* We only supporting cleaning shards right now */
|
/* We only supporting cleaning shards right now */
|
||||||
if (record->objectType != CLEANUP_OBJECT_SHARD_PLACEMENT)
|
if (record->objectType != CLEANUP_OBJECT_SHARD_PLACEMENT)
|
||||||
{
|
{
|
||||||
ereport(WARNING, (errmsg("Invalid object type %d for cleanup record ",
|
ereport(WARNING, (errmsg(
|
||||||
|
"Invalid object type %d on failed operation cleanup",
|
||||||
record->objectType)));
|
record->objectType)));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -473,6 +503,12 @@ FinalizeOperationNeedingCleanupOnFailure()
|
||||||
workerNode->workerName,
|
workerNode->workerName,
|
||||||
workerNode->workerPort))
|
workerNode->workerPort))
|
||||||
{
|
{
|
||||||
|
ereport(LOG, (errmsg("cleaned up orphaned shard %s on %s:%d after a "
|
||||||
|
"%s operation failed",
|
||||||
|
qualifiedTableName,
|
||||||
|
workerNode->workerName, workerNode->workerPort,
|
||||||
|
operationName)));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Given the operation is failing and we will abort its transaction, we cannot delete
|
* Given the operation is failing and we will abort its transaction, we cannot delete
|
||||||
* records in the current transaction. Delete these records outside of the
|
* records in the current transaction. Delete these records outside of the
|
||||||
|
@ -483,23 +519,22 @@ FinalizeOperationNeedingCleanupOnFailure()
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* We log failures at the end, since they occur repeatedly
|
||||||
|
* for a large number of objects.
|
||||||
|
*/
|
||||||
failedShardCountOnComplete++;
|
failedShardCountOnComplete++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (list_length(currentOperationRecordList) > 0)
|
|
||||||
{
|
|
||||||
ereport(LOG, (errmsg("Removed %d orphaned shards out of %d",
|
|
||||||
removedShardCountOnComplete, list_length(
|
|
||||||
currentOperationRecordList))));
|
|
||||||
|
|
||||||
if (failedShardCountOnComplete > 0)
|
if (failedShardCountOnComplete > 0)
|
||||||
{
|
{
|
||||||
ereport(WARNING, (errmsg("Failed to cleanup %d shards out of %d",
|
ereport(WARNING, (errmsg("failed to clean up %d orphaned shards out of %d after "
|
||||||
failedShardCountOnComplete, list_length(
|
"a %s operation failed",
|
||||||
currentOperationRecordList))));
|
failedShardCountOnComplete,
|
||||||
}
|
list_length(currentOperationRecordList),
|
||||||
|
operationName)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -509,7 +544,7 @@ FinalizeOperationNeedingCleanupOnFailure()
|
||||||
* completion with success. This will trigger cleanup of appropriate resources.
|
* completion with success. This will trigger cleanup of appropriate resources.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
FinalizeOperationNeedingCleanupOnSuccess()
|
FinalizeOperationNeedingCleanupOnSuccess(const char *operationName)
|
||||||
{
|
{
|
||||||
/* We must have a valid OperationId. Any operation requring cleanup
|
/* We must have a valid OperationId. Any operation requring cleanup
|
||||||
* will call RegisterOperationNeedingCleanup.
|
* will call RegisterOperationNeedingCleanup.
|
||||||
|
@ -527,7 +562,8 @@ FinalizeOperationNeedingCleanupOnSuccess()
|
||||||
/* We only supporting cleaning shards right now */
|
/* We only supporting cleaning shards right now */
|
||||||
if (record->objectType != CLEANUP_OBJECT_SHARD_PLACEMENT)
|
if (record->objectType != CLEANUP_OBJECT_SHARD_PLACEMENT)
|
||||||
{
|
{
|
||||||
ereport(WARNING, (errmsg("Invalid object type %d for cleanup record ",
|
ereport(WARNING, (errmsg(
|
||||||
|
"Invalid object type %d on operation cleanup",
|
||||||
record->objectType)));
|
record->objectType)));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -546,6 +582,12 @@ FinalizeOperationNeedingCleanupOnSuccess()
|
||||||
workerNode->workerName,
|
workerNode->workerName,
|
||||||
workerNode->workerPort))
|
workerNode->workerPort))
|
||||||
{
|
{
|
||||||
|
ereport(LOG, (errmsg("cleaned up orphaned shard %s on %s:%d after a "
|
||||||
|
"%s operation completed",
|
||||||
|
qualifiedTableName,
|
||||||
|
workerNode->workerName, workerNode->workerPort,
|
||||||
|
operationName)));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Delete cleanup records outside transaction as:
|
* Delete cleanup records outside transaction as:
|
||||||
* The resources are marked as 'CLEANUP_ALWAYS' and should be cleaned no matter
|
* The resources are marked as 'CLEANUP_ALWAYS' and should be cleaned no matter
|
||||||
|
@ -556,6 +598,10 @@ FinalizeOperationNeedingCleanupOnSuccess()
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* We log failures at the end, since they occur repeatedly
|
||||||
|
* for a large number of objects.
|
||||||
|
*/
|
||||||
failedShardCountOnComplete++;
|
failedShardCountOnComplete++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -570,18 +616,14 @@ FinalizeOperationNeedingCleanupOnSuccess()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (list_length(currentOperationRecordList) > 0)
|
|
||||||
{
|
|
||||||
ereport(LOG, (errmsg("Removed %d orphaned shards out of %d",
|
|
||||||
removedShardCountOnComplete, list_length(
|
|
||||||
currentOperationRecordList))));
|
|
||||||
|
|
||||||
if (failedShardCountOnComplete > 0)
|
if (failedShardCountOnComplete > 0)
|
||||||
{
|
{
|
||||||
ereport(WARNING, (errmsg("Failed to cleanup %d shards out of %d",
|
ereport(WARNING, (errmsg(
|
||||||
failedShardCountOnComplete, list_length(
|
"failed to clean up %d orphaned shards out of %d after "
|
||||||
currentOperationRecordList))));
|
"a %s operation completed",
|
||||||
}
|
failedShardCountOnComplete,
|
||||||
|
list_length(currentOperationRecordList),
|
||||||
|
operationName)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -670,9 +712,9 @@ InsertCleanupRecordInSubtransaction(CleanupObject objectType,
|
||||||
nodeGroupId,
|
nodeGroupId,
|
||||||
policy);
|
policy);
|
||||||
|
|
||||||
SendCommandListToWorkerOutsideTransaction(LocalHostName,
|
MultiConnection *connection =
|
||||||
PostPortNumber,
|
GetConnectionForLocalQueriesOutsideTransaction(CitusExtensionOwnerName());
|
||||||
CitusExtensionOwnerName(),
|
SendCommandListToWorkerOutsideTransactionWithConnection(connection,
|
||||||
list_make1(command->data));
|
list_make1(command->data));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -691,9 +733,9 @@ DeleteCleanupRecordByRecordIdOutsideTransaction(uint64 recordId)
|
||||||
PG_DIST_CLEANUP,
|
PG_DIST_CLEANUP,
|
||||||
recordId);
|
recordId);
|
||||||
|
|
||||||
SendCommandListToWorkerOutsideTransaction(LocalHostName,
|
MultiConnection *connection = GetConnectionForLocalQueriesOutsideTransaction(
|
||||||
PostPortNumber,
|
CitusExtensionOwnerName());
|
||||||
CitusExtensionOwnerName(),
|
SendCommandListToWorkerOutsideTransactionWithConnection(connection,
|
||||||
list_make1(command->data));
|
list_make1(command->data));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -727,18 +769,11 @@ TryLockRelationAndPlacementCleanup(Oid relationId, LOCKMODE lockmode)
|
||||||
* true on success.
|
* true on success.
|
||||||
*/
|
*/
|
||||||
static bool
|
static bool
|
||||||
TryDropShardOutsideTransaction(OperationId operationId, char *qualifiedTableName,
|
TryDropShardOutsideTransaction(OperationId operationId,
|
||||||
char *nodeName, int nodePort)
|
char *qualifiedTableName,
|
||||||
|
char *nodeName,
|
||||||
|
int nodePort)
|
||||||
{
|
{
|
||||||
char *operation = (operationId == INVALID_OPERATION_ID) ? "move" : "cleanup";
|
|
||||||
|
|
||||||
ereport(LOG, (errmsg("cleaning up %s on %s:%d which was left "
|
|
||||||
"after a %s",
|
|
||||||
qualifiedTableName,
|
|
||||||
nodeName,
|
|
||||||
nodePort,
|
|
||||||
operation)));
|
|
||||||
|
|
||||||
/* prepare sql query to execute to drop the shard */
|
/* prepare sql query to execute to drop the shard */
|
||||||
StringInfo dropQuery = makeStringInfo();
|
StringInfo dropQuery = makeStringInfo();
|
||||||
appendStringInfo(dropQuery, DROP_REGULAR_TABLE_COMMAND, qualifiedTableName);
|
appendStringInfo(dropQuery, DROP_REGULAR_TABLE_COMMAND, qualifiedTableName);
|
||||||
|
@ -756,9 +791,13 @@ TryDropShardOutsideTransaction(OperationId operationId, char *qualifiedTableName
|
||||||
dropQuery->data);
|
dropQuery->data);
|
||||||
|
|
||||||
/* remove the shard from the node */
|
/* remove the shard from the node */
|
||||||
bool success = SendOptionalCommandListToWorkerOutsideTransaction(nodeName,
|
int connectionFlags = OUTSIDE_TRANSACTION;
|
||||||
nodePort,
|
MultiConnection *workerConnection = GetNodeUserDatabaseConnection(connectionFlags,
|
||||||
NULL,
|
nodeName, nodePort,
|
||||||
|
CurrentUserName(),
|
||||||
|
NULL);
|
||||||
|
bool success = SendOptionalCommandListToWorkerOutsideTransactionWithConnection(
|
||||||
|
workerConnection,
|
||||||
dropCommandList);
|
dropCommandList);
|
||||||
|
|
||||||
return success;
|
return success;
|
||||||
|
@ -800,13 +839,8 @@ GetNextOperationId()
|
||||||
appendStringInfo(nextValueCommand, "SELECT nextval(%s);",
|
appendStringInfo(nextValueCommand, "SELECT nextval(%s);",
|
||||||
quote_literal_cstr(sequenceName->data));
|
quote_literal_cstr(sequenceName->data));
|
||||||
|
|
||||||
int connectionFlag = FORCE_NEW_CONNECTION;
|
MultiConnection *connection = GetConnectionForLocalQueriesOutsideTransaction(
|
||||||
MultiConnection *connection = GetNodeUserDatabaseConnection(connectionFlag,
|
CitusExtensionOwnerName());
|
||||||
LocalHostName,
|
|
||||||
PostPortNumber,
|
|
||||||
CitusExtensionOwnerName(),
|
|
||||||
get_database_name(
|
|
||||||
MyDatabaseId));
|
|
||||||
|
|
||||||
PGresult *result = NULL;
|
PGresult *result = NULL;
|
||||||
int queryResult = ExecuteOptionalRemoteCommand(connection, nextValueCommand->data,
|
int queryResult = ExecuteOptionalRemoteCommand(connection, nextValueCommand->data,
|
||||||
|
@ -821,7 +855,6 @@ GetNextOperationId()
|
||||||
|
|
||||||
PQclear(result);
|
PQclear(result);
|
||||||
ForgetResults(connection);
|
ForgetResults(connection);
|
||||||
CloseConnection(connection);
|
|
||||||
|
|
||||||
return operationdId;
|
return operationdId;
|
||||||
}
|
}
|
||||||
|
|
|
@ -151,7 +151,7 @@ static List * ExecuteSplitShardReplicationSetupUDF(WorkerNode *sourceWorkerNode,
|
||||||
List *destinationWorkerNodesList,
|
List *destinationWorkerNodesList,
|
||||||
DistributionColumnMap *
|
DistributionColumnMap *
|
||||||
distributionColumnOverrides);
|
distributionColumnOverrides);
|
||||||
static void ExecuteSplitShardReleaseSharedMemory(WorkerNode *sourceWorkerNode);
|
static void ExecuteSplitShardReleaseSharedMemory(MultiConnection *sourceConnection);
|
||||||
static void AddDummyShardEntryInMap(HTAB *mapOfPlacementToDummyShardList, uint32
|
static void AddDummyShardEntryInMap(HTAB *mapOfPlacementToDummyShardList, uint32
|
||||||
targetNodeId,
|
targetNodeId,
|
||||||
ShardInterval *shardInterval);
|
ShardInterval *shardInterval);
|
||||||
|
@ -169,6 +169,12 @@ static const char *const SplitOperationName[] =
|
||||||
[ISOLATE_TENANT_TO_NEW_SHARD] = "isolate",
|
[ISOLATE_TENANT_TO_NEW_SHARD] = "isolate",
|
||||||
[CREATE_DISTRIBUTED_TABLE] = "create"
|
[CREATE_DISTRIBUTED_TABLE] = "create"
|
||||||
};
|
};
|
||||||
|
static const char *const SplitOperationAPIName[] =
|
||||||
|
{
|
||||||
|
[SHARD_SPLIT_API] = "citus_split_shard_by_split_points",
|
||||||
|
[ISOLATE_TENANT_TO_NEW_SHARD] = "isolate_tenant_to_new_shard",
|
||||||
|
[CREATE_DISTRIBUTED_TABLE] = "create_distributed_table_concurrently"
|
||||||
|
};
|
||||||
static const char *const SplitTargetName[] =
|
static const char *const SplitTargetName[] =
|
||||||
{
|
{
|
||||||
[SHARD_SPLIT_API] = "shard",
|
[SHARD_SPLIT_API] = "shard",
|
||||||
|
@ -469,6 +475,8 @@ SplitShard(SplitMode splitMode,
|
||||||
List *colocatedShardIntervalList,
|
List *colocatedShardIntervalList,
|
||||||
uint32 targetColocationId)
|
uint32 targetColocationId)
|
||||||
{
|
{
|
||||||
|
const char *operationName = SplitOperationAPIName[splitOperation];
|
||||||
|
|
||||||
ErrorIfModificationAndSplitInTheSameTransaction(splitOperation);
|
ErrorIfModificationAndSplitInTheSameTransaction(splitOperation);
|
||||||
|
|
||||||
ShardInterval *shardIntervalToSplit = LoadShardInterval(shardIdToSplit);
|
ShardInterval *shardIntervalToSplit = LoadShardInterval(shardIdToSplit);
|
||||||
|
@ -526,6 +534,8 @@ SplitShard(SplitMode splitMode,
|
||||||
|
|
||||||
if (splitMode == BLOCKING_SPLIT)
|
if (splitMode == BLOCKING_SPLIT)
|
||||||
{
|
{
|
||||||
|
ereport(LOG, (errmsg("performing blocking %s ", operationName)));
|
||||||
|
|
||||||
BlockingShardSplit(
|
BlockingShardSplit(
|
||||||
splitOperation,
|
splitOperation,
|
||||||
splitWorkflowId,
|
splitWorkflowId,
|
||||||
|
@ -536,6 +546,8 @@ SplitShard(SplitMode splitMode,
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
ereport(LOG, (errmsg("performing non-blocking %s ", operationName)));
|
||||||
|
|
||||||
NonBlockingShardSplit(
|
NonBlockingShardSplit(
|
||||||
splitOperation,
|
splitOperation,
|
||||||
splitWorkflowId,
|
splitWorkflowId,
|
||||||
|
@ -548,7 +560,10 @@ SplitShard(SplitMode splitMode,
|
||||||
PlacementMovedUsingLogicalReplicationInTX = true;
|
PlacementMovedUsingLogicalReplicationInTX = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
FinalizeOperationNeedingCleanupOnSuccess();
|
/*
|
||||||
|
* Drop temporary objects that were marked as CLEANUP_ALWAYS.
|
||||||
|
*/
|
||||||
|
FinalizeOperationNeedingCleanupOnSuccess(operationName);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -569,6 +584,8 @@ BlockingShardSplit(SplitOperation splitOperation,
|
||||||
List *workersForPlacementList,
|
List *workersForPlacementList,
|
||||||
DistributionColumnMap *distributionColumnOverrides)
|
DistributionColumnMap *distributionColumnOverrides)
|
||||||
{
|
{
|
||||||
|
const char *operationName = SplitOperationAPIName[splitOperation];
|
||||||
|
|
||||||
BlockWritesToShardList(sourceColocatedShardIntervalList);
|
BlockWritesToShardList(sourceColocatedShardIntervalList);
|
||||||
|
|
||||||
/* First create shard interval metadata for split children */
|
/* First create shard interval metadata for split children */
|
||||||
|
@ -583,10 +600,14 @@ BlockingShardSplit(SplitOperation splitOperation,
|
||||||
|
|
||||||
PG_TRY();
|
PG_TRY();
|
||||||
{
|
{
|
||||||
|
ereport(LOG, (errmsg("creating child shards for %s", operationName)));
|
||||||
|
|
||||||
/* Physically create split children. */
|
/* Physically create split children. */
|
||||||
CreateSplitShardsForShardGroup(shardGroupSplitIntervalListList,
|
CreateSplitShardsForShardGroup(shardGroupSplitIntervalListList,
|
||||||
workersForPlacementList);
|
workersForPlacementList);
|
||||||
|
|
||||||
|
ereport(LOG, (errmsg("performing copy for %s", operationName)));
|
||||||
|
|
||||||
/* For Blocking split, copy isn't snapshotted */
|
/* For Blocking split, copy isn't snapshotted */
|
||||||
char *snapshotName = NULL;
|
char *snapshotName = NULL;
|
||||||
DoSplitCopy(sourceShardNode, sourceColocatedShardIntervalList,
|
DoSplitCopy(sourceShardNode, sourceColocatedShardIntervalList,
|
||||||
|
@ -596,6 +617,10 @@ BlockingShardSplit(SplitOperation splitOperation,
|
||||||
/* Used for testing */
|
/* Used for testing */
|
||||||
ConflictOnlyWithIsolationTesting();
|
ConflictOnlyWithIsolationTesting();
|
||||||
|
|
||||||
|
ereport(LOG, (errmsg(
|
||||||
|
"creating auxillary structures (indexes, stats, replicaindentities, triggers) for %s",
|
||||||
|
operationName)));
|
||||||
|
|
||||||
/* Create auxiliary structures (indexes, stats, replicaindentities, triggers) */
|
/* Create auxiliary structures (indexes, stats, replicaindentities, triggers) */
|
||||||
CreateAuxiliaryStructuresForShardGroup(shardGroupSplitIntervalListList,
|
CreateAuxiliaryStructuresForShardGroup(shardGroupSplitIntervalListList,
|
||||||
workersForPlacementList,
|
workersForPlacementList,
|
||||||
|
@ -617,10 +642,16 @@ BlockingShardSplit(SplitOperation splitOperation,
|
||||||
*/
|
*/
|
||||||
if (DeferShardDeleteOnSplit)
|
if (DeferShardDeleteOnSplit)
|
||||||
{
|
{
|
||||||
|
ereport(LOG, (errmsg("marking deferred cleanup of source shard(s) for %s",
|
||||||
|
operationName)));
|
||||||
|
|
||||||
InsertDeferredDropCleanupRecordsForShards(sourceColocatedShardIntervalList);
|
InsertDeferredDropCleanupRecordsForShards(sourceColocatedShardIntervalList);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
ereport(LOG, (errmsg("performing cleanup of source shard(s) for %s",
|
||||||
|
operationName)));
|
||||||
|
|
||||||
DropShardList(sourceColocatedShardIntervalList);
|
DropShardList(sourceColocatedShardIntervalList);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -635,6 +666,9 @@ BlockingShardSplit(SplitOperation splitOperation,
|
||||||
shardGroupSplitIntervalListList,
|
shardGroupSplitIntervalListList,
|
||||||
workersForPlacementList);
|
workersForPlacementList);
|
||||||
|
|
||||||
|
ereport(LOG, (errmsg("creating foreign key constraints (if any) for %s",
|
||||||
|
operationName)));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Create foreign keys if exists after the metadata changes happening in
|
* Create foreign keys if exists after the metadata changes happening in
|
||||||
* DropShardList() and InsertSplitChildrenShardMetadata() because the foreign
|
* DropShardList() and InsertSplitChildrenShardMetadata() because the foreign
|
||||||
|
@ -649,7 +683,7 @@ BlockingShardSplit(SplitOperation splitOperation,
|
||||||
ShutdownAllConnections();
|
ShutdownAllConnections();
|
||||||
|
|
||||||
/* Do a best effort cleanup of shards created on workers in the above block */
|
/* Do a best effort cleanup of shards created on workers in the above block */
|
||||||
FinalizeOperationNeedingCleanupOnFailure();
|
FinalizeOperationNeedingCleanupOnFailure(operationName);
|
||||||
|
|
||||||
PG_RE_THROW();
|
PG_RE_THROW();
|
||||||
}
|
}
|
||||||
|
@ -670,10 +704,15 @@ CheckIfRelationWithSameNameExists(ShardInterval *shardInterval, WorkerNode *work
|
||||||
AppendShardIdToName(&shardName, shardInterval->shardId);
|
AppendShardIdToName(&shardName, shardInterval->shardId);
|
||||||
|
|
||||||
StringInfo checkShardExistsQuery = makeStringInfo();
|
StringInfo checkShardExistsQuery = makeStringInfo();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We pass schemaName and shardName without quote_identifier, since
|
||||||
|
* they are used as strings here.
|
||||||
|
*/
|
||||||
appendStringInfo(checkShardExistsQuery,
|
appendStringInfo(checkShardExistsQuery,
|
||||||
"SELECT EXISTS (SELECT FROM pg_catalog.pg_tables WHERE schemaname = '%s' AND tablename = '%s');",
|
"SELECT EXISTS (SELECT FROM pg_catalog.pg_tables WHERE schemaname = %s AND tablename = %s);",
|
||||||
schemaName,
|
quote_literal_cstr(schemaName),
|
||||||
shardName);
|
quote_literal_cstr(shardName));
|
||||||
|
|
||||||
int connectionFlags = 0;
|
int connectionFlags = 0;
|
||||||
MultiConnection *connection = GetNodeUserDatabaseConnection(connectionFlags,
|
MultiConnection *connection = GetNodeUserDatabaseConnection(connectionFlags,
|
||||||
|
@ -691,11 +730,13 @@ CheckIfRelationWithSameNameExists(ShardInterval *shardInterval, WorkerNode *work
|
||||||
ReportResultError(connection, result, ERROR);
|
ReportResultError(connection, result, ERROR);
|
||||||
}
|
}
|
||||||
|
|
||||||
char *checkExists = PQgetvalue(result, 0, 0);
|
char *existsString = PQgetvalue(result, 0, 0);
|
||||||
|
bool tableExists = strcmp(existsString, "t") == 0;
|
||||||
|
|
||||||
PQclear(result);
|
PQclear(result);
|
||||||
ForgetResults(connection);
|
ForgetResults(connection);
|
||||||
|
|
||||||
return strcmp(checkExists, "t") == 0;
|
return tableExists;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1015,10 +1056,12 @@ static void
|
||||||
CreateObjectOnPlacement(List *objectCreationCommandList,
|
CreateObjectOnPlacement(List *objectCreationCommandList,
|
||||||
WorkerNode *workerPlacementNode)
|
WorkerNode *workerPlacementNode)
|
||||||
{
|
{
|
||||||
char *currentUser = CurrentUserName();
|
MultiConnection *connection =
|
||||||
SendCommandListToWorkerOutsideTransaction(workerPlacementNode->workerName,
|
GetNodeUserDatabaseConnection(OUTSIDE_TRANSACTION,
|
||||||
|
workerPlacementNode->workerName,
|
||||||
workerPlacementNode->workerPort,
|
workerPlacementNode->workerPort,
|
||||||
currentUser,
|
NULL, NULL);
|
||||||
|
SendCommandListToWorkerOutsideTransactionWithConnection(connection,
|
||||||
objectCreationCommandList);
|
objectCreationCommandList);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1487,6 +1530,8 @@ NonBlockingShardSplit(SplitOperation splitOperation,
|
||||||
DistributionColumnMap *distributionColumnOverrides,
|
DistributionColumnMap *distributionColumnOverrides,
|
||||||
uint32 targetColocationId)
|
uint32 targetColocationId)
|
||||||
{
|
{
|
||||||
|
const char *operationName = SplitOperationAPIName[splitOperation];
|
||||||
|
|
||||||
ErrorIfMultipleNonblockingMoveSplitInTheSameTransaction();
|
ErrorIfMultipleNonblockingMoveSplitInTheSameTransaction();
|
||||||
|
|
||||||
char *superUser = CitusExtensionOwnerName();
|
char *superUser = CitusExtensionOwnerName();
|
||||||
|
@ -1529,6 +1574,9 @@ NonBlockingShardSplit(SplitOperation splitOperation,
|
||||||
/* Non-Blocking shard split workflow starts here */
|
/* Non-Blocking shard split workflow starts here */
|
||||||
PG_TRY();
|
PG_TRY();
|
||||||
{
|
{
|
||||||
|
ereport(LOG, (errmsg("creating child shards for %s",
|
||||||
|
operationName)));
|
||||||
|
|
||||||
/* 1) Physically create split children. */
|
/* 1) Physically create split children. */
|
||||||
CreateSplitShardsForShardGroup(shardGroupSplitIntervalListList,
|
CreateSplitShardsForShardGroup(shardGroupSplitIntervalListList,
|
||||||
workersForPlacementList);
|
workersForPlacementList);
|
||||||
|
@ -1558,6 +1606,10 @@ NonBlockingShardSplit(SplitOperation splitOperation,
|
||||||
*/
|
*/
|
||||||
CreateReplicaIdentitiesForDummyShards(mapOfPlacementToDummyShardList);
|
CreateReplicaIdentitiesForDummyShards(mapOfPlacementToDummyShardList);
|
||||||
|
|
||||||
|
ereport(LOG, (errmsg(
|
||||||
|
"creating replication artifacts (publications, replication slots, subscriptions for %s",
|
||||||
|
operationName)));
|
||||||
|
|
||||||
/* 4) Create Publications. */
|
/* 4) Create Publications. */
|
||||||
CreatePublications(sourceConnection, publicationInfoHash);
|
CreatePublications(sourceConnection, publicationInfoHash);
|
||||||
|
|
||||||
|
@ -1606,11 +1658,35 @@ NonBlockingShardSplit(SplitOperation splitOperation,
|
||||||
databaseName,
|
databaseName,
|
||||||
logicalRepTargetList);
|
logicalRepTargetList);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We have to create the primary key (or any other replica identity)
|
||||||
|
* before the update/delete operations that are queued will be
|
||||||
|
* replicated. Because if the replica identity does not exist on the
|
||||||
|
* target, the replication would fail.
|
||||||
|
*
|
||||||
|
* So the latest possible moment we could do this is right after the
|
||||||
|
* initial data COPY, but before enabling the susbcriptions. It might
|
||||||
|
* seem like a good idea to it after the initial data COPY, since
|
||||||
|
* it's generally the rule that it's cheaper to build an index at once
|
||||||
|
* than to create it incrementally. This general rule, is why we create
|
||||||
|
* all the regular indexes as late during the move as possible.
|
||||||
|
*
|
||||||
|
* But as it turns out in practice it's not as clear cut, and we saw a
|
||||||
|
* speed degradation in the time it takes to move shards when doing the
|
||||||
|
* replica identity creation after the initial COPY. So, instead we
|
||||||
|
* keep it before the COPY.
|
||||||
|
*/
|
||||||
|
CreateReplicaIdentities(logicalRepTargetList);
|
||||||
|
|
||||||
|
ereport(LOG, (errmsg("performing copy for %s", operationName)));
|
||||||
|
|
||||||
/* 8) Do snapshotted Copy */
|
/* 8) Do snapshotted Copy */
|
||||||
DoSplitCopy(sourceShardToCopyNode, sourceColocatedShardIntervalList,
|
DoSplitCopy(sourceShardToCopyNode, sourceColocatedShardIntervalList,
|
||||||
shardGroupSplitIntervalListList, workersForPlacementList,
|
shardGroupSplitIntervalListList, workersForPlacementList,
|
||||||
snapshot, distributionColumnOverrides);
|
snapshot, distributionColumnOverrides);
|
||||||
|
|
||||||
|
ereport(LOG, (errmsg("replicating changes for %s", operationName)));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* 9) Logically replicate all the changes and do most of the table DDL,
|
* 9) Logically replicate all the changes and do most of the table DDL,
|
||||||
* like index and foreign key creation.
|
* like index and foreign key creation.
|
||||||
|
@ -1631,10 +1707,16 @@ NonBlockingShardSplit(SplitOperation splitOperation,
|
||||||
*/
|
*/
|
||||||
if (DeferShardDeleteOnSplit)
|
if (DeferShardDeleteOnSplit)
|
||||||
{
|
{
|
||||||
|
ereport(LOG, (errmsg("marking deferred cleanup of source shard(s) for %s",
|
||||||
|
operationName)));
|
||||||
|
|
||||||
InsertDeferredDropCleanupRecordsForShards(sourceColocatedShardIntervalList);
|
InsertDeferredDropCleanupRecordsForShards(sourceColocatedShardIntervalList);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
ereport(LOG, (errmsg("performing cleanup of source shard(s) for %s",
|
||||||
|
operationName)));
|
||||||
|
|
||||||
DropShardList(sourceColocatedShardIntervalList);
|
DropShardList(sourceColocatedShardIntervalList);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1683,6 +1765,9 @@ NonBlockingShardSplit(SplitOperation splitOperation,
|
||||||
*/
|
*/
|
||||||
CreatePartitioningHierarchy(logicalRepTargetList);
|
CreatePartitioningHierarchy(logicalRepTargetList);
|
||||||
|
|
||||||
|
ereport(LOG, (errmsg("creating foreign key constraints (if any) for %s",
|
||||||
|
operationName)));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* 14) Create foreign keys if exists after the metadata changes happening in
|
* 14) Create foreign keys if exists after the metadata changes happening in
|
||||||
* DropShardList() and InsertSplitChildrenShardMetadata() because the foreign
|
* DropShardList() and InsertSplitChildrenShardMetadata() because the foreign
|
||||||
|
@ -1694,7 +1779,7 @@ NonBlockingShardSplit(SplitOperation splitOperation,
|
||||||
* 15) Release shared memory allocated by worker_split_shard_replication_setup udf
|
* 15) Release shared memory allocated by worker_split_shard_replication_setup udf
|
||||||
* at source node.
|
* at source node.
|
||||||
*/
|
*/
|
||||||
ExecuteSplitShardReleaseSharedMemory(sourceShardToCopyNode);
|
ExecuteSplitShardReleaseSharedMemory(sourceConnection);
|
||||||
|
|
||||||
/* 16) Close source connection */
|
/* 16) Close source connection */
|
||||||
CloseConnection(sourceConnection);
|
CloseConnection(sourceConnection);
|
||||||
|
@ -1716,7 +1801,11 @@ NonBlockingShardSplit(SplitOperation splitOperation,
|
||||||
*/
|
*/
|
||||||
DropAllLogicalReplicationLeftovers(SHARD_SPLIT);
|
DropAllLogicalReplicationLeftovers(SHARD_SPLIT);
|
||||||
|
|
||||||
FinalizeOperationNeedingCleanupOnFailure();
|
/*
|
||||||
|
* Drop temporary objects that were marked as CLEANUP_ON_FAILURE
|
||||||
|
* or CLEANUP_ALWAYS.
|
||||||
|
*/
|
||||||
|
FinalizeOperationNeedingCleanupOnFailure(operationName);
|
||||||
|
|
||||||
PG_RE_THROW();
|
PG_RE_THROW();
|
||||||
}
|
}
|
||||||
|
@ -1987,19 +2076,8 @@ ExecuteSplitShardReplicationSetupUDF(WorkerNode *sourceWorkerNode,
|
||||||
* shared memory to store split information. This has to be released after split completes(or fails).
|
* shared memory to store split information. This has to be released after split completes(or fails).
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
ExecuteSplitShardReleaseSharedMemory(WorkerNode *sourceWorkerNode)
|
ExecuteSplitShardReleaseSharedMemory(MultiConnection *sourceConnection)
|
||||||
{
|
{
|
||||||
char *superUser = CitusExtensionOwnerName();
|
|
||||||
char *databaseName = get_database_name(MyDatabaseId);
|
|
||||||
|
|
||||||
int connectionFlag = FORCE_NEW_CONNECTION;
|
|
||||||
MultiConnection *sourceConnection = GetNodeUserDatabaseConnection(
|
|
||||||
connectionFlag,
|
|
||||||
sourceWorkerNode->workerName,
|
|
||||||
sourceWorkerNode->workerPort,
|
|
||||||
superUser,
|
|
||||||
databaseName);
|
|
||||||
|
|
||||||
StringInfo splitShardReleaseMemoryUDF = makeStringInfo();
|
StringInfo splitShardReleaseMemoryUDF = makeStringInfo();
|
||||||
appendStringInfo(splitShardReleaseMemoryUDF,
|
appendStringInfo(splitShardReleaseMemoryUDF,
|
||||||
"SELECT pg_catalog.worker_split_shard_release_dsm();");
|
"SELECT pg_catalog.worker_split_shard_release_dsm();");
|
||||||
|
@ -2214,14 +2292,8 @@ GetNextShardIdForSplitChild()
|
||||||
appendStringInfo(nextValueCommand, "SELECT nextval(%s);", quote_literal_cstr(
|
appendStringInfo(nextValueCommand, "SELECT nextval(%s);", quote_literal_cstr(
|
||||||
"pg_catalog.pg_dist_shardid_seq"));
|
"pg_catalog.pg_dist_shardid_seq"));
|
||||||
|
|
||||||
int connectionFlag = FORCE_NEW_CONNECTION;
|
MultiConnection *connection = GetConnectionForLocalQueriesOutsideTransaction(
|
||||||
MultiConnection *connection = GetNodeUserDatabaseConnection(connectionFlag,
|
CitusExtensionOwnerName());
|
||||||
LocalHostName,
|
|
||||||
PostPortNumber,
|
|
||||||
CitusExtensionOwnerName(),
|
|
||||||
get_database_name(
|
|
||||||
MyDatabaseId));
|
|
||||||
|
|
||||||
PGresult *result = NULL;
|
PGresult *result = NULL;
|
||||||
int queryResult = ExecuteOptionalRemoteCommand(connection, nextValueCommand->data,
|
int queryResult = ExecuteOptionalRemoteCommand(connection, nextValueCommand->data,
|
||||||
&result);
|
&result);
|
||||||
|
@ -2238,7 +2310,8 @@ GetNextShardIdForSplitChild()
|
||||||
}
|
}
|
||||||
|
|
||||||
shardId = SafeStringToUint64(PQgetvalue(result, 0, 0 /* nodeId column*/));
|
shardId = SafeStringToUint64(PQgetvalue(result, 0, 0 /* nodeId column*/));
|
||||||
CloseConnection(connection);
|
PQclear(result);
|
||||||
|
ForgetResults(connection);
|
||||||
|
|
||||||
return shardId;
|
return shardId;
|
||||||
}
|
}
|
||||||
|
|
|
@ -267,8 +267,11 @@ ErrorIfCoordinatorNotAddedAsWorkerNode()
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
ereport(ERROR, (errmsg("could not find the coordinator node in "
|
ereport(ERROR, (errmsg("operation is not allowed when coordinator "
|
||||||
"metadata as it is not added as a worker")));
|
"is not added into metadata"),
|
||||||
|
errhint("Use \"SELECT citus_set_coordinator_host('"
|
||||||
|
"<hostname>', '<port>')\" to configure the "
|
||||||
|
"coordinator hostname and port")));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -43,6 +43,10 @@ static DestReceiver * CreatePartitionedSplitCopyDestReceiver(EState *executor,
|
||||||
List *splitCopyInfoList);
|
List *splitCopyInfoList);
|
||||||
static void BuildMinMaxRangeArrays(List *splitCopyInfoList, ArrayType **minValueArray,
|
static void BuildMinMaxRangeArrays(List *splitCopyInfoList, ArrayType **minValueArray,
|
||||||
ArrayType **maxValueArray);
|
ArrayType **maxValueArray);
|
||||||
|
static char * TraceWorkerSplitCopyUdf(char *sourceShardToCopySchemaName,
|
||||||
|
char *sourceShardToCopyPrefix,
|
||||||
|
char *sourceShardToCopyQualifiedName,
|
||||||
|
List *splitCopyInfoList);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* worker_split_copy(source_shard_id bigint, splitCopyInfo pg_catalog.split_copy_info[])
|
* worker_split_copy(source_shard_id bigint, splitCopyInfo pg_catalog.split_copy_info[])
|
||||||
|
@ -93,12 +97,18 @@ worker_split_copy(PG_FUNCTION_ARGS)
|
||||||
Oid sourceShardToCopySchemaOId = get_rel_namespace(
|
Oid sourceShardToCopySchemaOId = get_rel_namespace(
|
||||||
shardIntervalToSplitCopy->relationId);
|
shardIntervalToSplitCopy->relationId);
|
||||||
char *sourceShardToCopySchemaName = get_namespace_name(sourceShardToCopySchemaOId);
|
char *sourceShardToCopySchemaName = get_namespace_name(sourceShardToCopySchemaOId);
|
||||||
char *sourceShardToCopyName = get_rel_name(shardIntervalToSplitCopy->relationId);
|
char *sourceShardPrefix = get_rel_name(shardIntervalToSplitCopy->relationId);
|
||||||
|
char *sourceShardToCopyName = pstrdup(sourceShardPrefix);
|
||||||
AppendShardIdToName(&sourceShardToCopyName, shardIdToSplitCopy);
|
AppendShardIdToName(&sourceShardToCopyName, shardIdToSplitCopy);
|
||||||
char *sourceShardToCopyQualifiedName = quote_qualified_identifier(
|
char *sourceShardToCopyQualifiedName = quote_qualified_identifier(
|
||||||
sourceShardToCopySchemaName,
|
sourceShardToCopySchemaName,
|
||||||
sourceShardToCopyName);
|
sourceShardToCopyName);
|
||||||
|
|
||||||
|
ereport(LOG, (errmsg("%s", TraceWorkerSplitCopyUdf(sourceShardToCopySchemaName,
|
||||||
|
sourceShardPrefix,
|
||||||
|
sourceShardToCopyQualifiedName,
|
||||||
|
splitCopyInfoList))));
|
||||||
|
|
||||||
StringInfo selectShardQueryForCopy = makeStringInfo();
|
StringInfo selectShardQueryForCopy = makeStringInfo();
|
||||||
appendStringInfo(selectShardQueryForCopy,
|
appendStringInfo(selectShardQueryForCopy,
|
||||||
"SELECT * FROM %s;", sourceShardToCopyQualifiedName);
|
"SELECT * FROM %s;", sourceShardToCopyQualifiedName);
|
||||||
|
@ -113,6 +123,48 @@ worker_split_copy(PG_FUNCTION_ARGS)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* Trace split copy udf */
|
||||||
|
static char *
|
||||||
|
TraceWorkerSplitCopyUdf(char *sourceShardToCopySchemaName,
|
||||||
|
char *sourceShardToCopyPrefix,
|
||||||
|
char *sourceShardToCopyQualifiedName,
|
||||||
|
List *splitCopyInfoList)
|
||||||
|
{
|
||||||
|
StringInfo splitCopyTrace = makeStringInfo();
|
||||||
|
appendStringInfo(splitCopyTrace, "performing copy from shard %s to [",
|
||||||
|
sourceShardToCopyQualifiedName);
|
||||||
|
|
||||||
|
/* split copy always has atleast two destinations */
|
||||||
|
int index = 1;
|
||||||
|
int splitWayCount = list_length(splitCopyInfoList);
|
||||||
|
SplitCopyInfo *splitCopyInfo = NULL;
|
||||||
|
foreach_ptr(splitCopyInfo, splitCopyInfoList)
|
||||||
|
{
|
||||||
|
char *shardNameCopy = pstrdup(sourceShardToCopyPrefix);
|
||||||
|
AppendShardIdToName(&shardNameCopy, splitCopyInfo->destinationShardId);
|
||||||
|
|
||||||
|
char *shardNameCopyQualifiedName = quote_qualified_identifier(
|
||||||
|
sourceShardToCopySchemaName,
|
||||||
|
shardNameCopy);
|
||||||
|
|
||||||
|
appendStringInfo(splitCopyTrace, "%s (nodeId: %u)", shardNameCopyQualifiedName,
|
||||||
|
splitCopyInfo->destinationShardNodeId);
|
||||||
|
pfree(shardNameCopy);
|
||||||
|
|
||||||
|
if (index < splitWayCount)
|
||||||
|
{
|
||||||
|
appendStringInfo(splitCopyTrace, ", ");
|
||||||
|
}
|
||||||
|
|
||||||
|
index++;
|
||||||
|
}
|
||||||
|
|
||||||
|
appendStringInfo(splitCopyTrace, "]");
|
||||||
|
|
||||||
|
return splitCopyTrace->data;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/* Parse a single SplitCopyInfo Tuple */
|
/* Parse a single SplitCopyInfo Tuple */
|
||||||
static void
|
static void
|
||||||
ParseSplitCopyInfoDatum(Datum splitCopyInfoDatum, SplitCopyInfo **splitCopyInfo)
|
ParseSplitCopyInfoDatum(Datum splitCopyInfoDatum, SplitCopyInfo **splitCopyInfo)
|
||||||
|
|
|
@ -281,7 +281,8 @@ PopulateShardSplitInfoInSM(ShardSplitInfoSMHeader *shardSplitInfoSMHeader)
|
||||||
{
|
{
|
||||||
uint32_t nodeId = entry->key.nodeId;
|
uint32_t nodeId = entry->key.nodeId;
|
||||||
uint32_t tableOwnerId = entry->key.tableOwnerId;
|
uint32_t tableOwnerId = entry->key.tableOwnerId;
|
||||||
char *derivedSlotName = ReplicationSlotName(SHARD_SPLIT, nodeId, tableOwnerId);
|
char *derivedSlotName = ReplicationSlotNameForNodeAndOwner(SHARD_SPLIT, nodeId,
|
||||||
|
tableOwnerId);
|
||||||
|
|
||||||
List *shardSplitInfoList = entry->shardSplitInfoList;
|
List *shardSplitInfoList = entry->shardSplitInfoList;
|
||||||
ShardSplitInfo *splitShardInfo = NULL;
|
ShardSplitInfo *splitShardInfo = NULL;
|
||||||
|
@ -389,7 +390,8 @@ ReturnReplicationSlotInfo(Tuplestorestate *tupleStore, TupleDesc
|
||||||
char *tableOwnerName = GetUserNameFromId(entry->key.tableOwnerId, false);
|
char *tableOwnerName = GetUserNameFromId(entry->key.tableOwnerId, false);
|
||||||
values[1] = CStringGetTextDatum(tableOwnerName);
|
values[1] = CStringGetTextDatum(tableOwnerName);
|
||||||
|
|
||||||
char *slotName = ReplicationSlotName(SHARD_SPLIT, entry->key.nodeId,
|
char *slotName = ReplicationSlotNameForNodeAndOwner(SHARD_SPLIT,
|
||||||
|
entry->key.nodeId,
|
||||||
entry->key.tableOwnerId);
|
entry->key.tableOwnerId);
|
||||||
values[2] = CStringGetTextDatum(slotName);
|
values[2] = CStringGetTextDatum(slotName);
|
||||||
|
|
||||||
|
|
|
@ -1897,14 +1897,14 @@ multi_relation_restriction_hook(PlannerInfo *root, RelOptInfo *relOptInfo,
|
||||||
MemoryContext restrictionsMemoryContext = plannerRestrictionContext->memoryContext;
|
MemoryContext restrictionsMemoryContext = plannerRestrictionContext->memoryContext;
|
||||||
MemoryContext oldMemoryContext = MemoryContextSwitchTo(restrictionsMemoryContext);
|
MemoryContext oldMemoryContext = MemoryContextSwitchTo(restrictionsMemoryContext);
|
||||||
|
|
||||||
bool distributedTable = IsCitusTable(rte->relid);
|
bool isCitusTable = IsCitusTable(rte->relid);
|
||||||
|
|
||||||
RelationRestriction *relationRestriction = palloc0(sizeof(RelationRestriction));
|
RelationRestriction *relationRestriction = palloc0(sizeof(RelationRestriction));
|
||||||
relationRestriction->index = restrictionIndex;
|
relationRestriction->index = restrictionIndex;
|
||||||
relationRestriction->relationId = rte->relid;
|
relationRestriction->relationId = rte->relid;
|
||||||
relationRestriction->rte = rte;
|
relationRestriction->rte = rte;
|
||||||
relationRestriction->relOptInfo = relOptInfo;
|
relationRestriction->relOptInfo = relOptInfo;
|
||||||
relationRestriction->distributedRelation = distributedTable;
|
relationRestriction->citusTable = isCitusTable;
|
||||||
relationRestriction->plannerInfo = root;
|
relationRestriction->plannerInfo = root;
|
||||||
|
|
||||||
/* see comments on GetVarFromAssignedParam() */
|
/* see comments on GetVarFromAssignedParam() */
|
||||||
|
@ -1919,10 +1919,42 @@ multi_relation_restriction_hook(PlannerInfo *root, RelOptInfo *relOptInfo,
|
||||||
* We're also keeping track of whether all participant
|
* We're also keeping track of whether all participant
|
||||||
* tables are reference tables.
|
* tables are reference tables.
|
||||||
*/
|
*/
|
||||||
if (distributedTable)
|
if (isCitusTable)
|
||||||
{
|
{
|
||||||
cacheEntry = GetCitusTableCacheEntry(rte->relid);
|
cacheEntry = GetCitusTableCacheEntry(rte->relid);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The statistics objects of the distributed table are not relevant
|
||||||
|
* for the distributed planning, so we can override it.
|
||||||
|
*
|
||||||
|
* Normally, we should not need this. However, the combination of
|
||||||
|
* Postgres commit 269b532aef55a579ae02a3e8e8df14101570dfd9 and
|
||||||
|
* Citus function AdjustPartitioningForDistributedPlanning()
|
||||||
|
* forces us to do this. The commit expects statistics objects
|
||||||
|
* of partitions to have "inh" flag set properly. Whereas, the
|
||||||
|
* function overrides "inh" flag. To avoid Postgres to throw error,
|
||||||
|
* we override statlist such that Postgres does not try to process
|
||||||
|
* any statistics objects during the standard_planner() on the
|
||||||
|
* coordinator. In the end, we do not need the standard_planner()
|
||||||
|
* on the coordinator to generate an optimized plan. We call
|
||||||
|
* into standard_planner() for other purposes, such as generating the
|
||||||
|
* relationRestrictionContext here.
|
||||||
|
*
|
||||||
|
* AdjustPartitioningForDistributedPlanning() is a hack that we use
|
||||||
|
* to prevent Postgres' standard_planner() to expand all the partitions
|
||||||
|
* for the distributed planning when a distributed partitioned table
|
||||||
|
* is queried. It is required for both correctness and performance
|
||||||
|
* reasons. Although we can eliminate the use of the function for
|
||||||
|
* the correctness (e.g., make sure that rest of the planner can handle
|
||||||
|
* partitions), it's performance implication is hard to avoid. Certain
|
||||||
|
* planning logic of Citus (such as router or query pushdown) relies
|
||||||
|
* heavily on the relationRestrictionList. If
|
||||||
|
* AdjustPartitioningForDistributedPlanning() is removed, all the
|
||||||
|
* partitions show up in the, causing high planning times for
|
||||||
|
* such queries.
|
||||||
|
*/
|
||||||
|
relOptInfo->statlist = NIL;
|
||||||
|
|
||||||
relationRestrictionContext->allReferenceTables &=
|
relationRestrictionContext->allReferenceTables &=
|
||||||
IsCitusTableTypeCacheEntry(cacheEntry, REFERENCE_TABLE);
|
IsCitusTableTypeCacheEntry(cacheEntry, REFERENCE_TABLE);
|
||||||
}
|
}
|
||||||
|
|
|
@ -3692,7 +3692,7 @@ CopyRelationRestrictionContext(RelationRestrictionContext *oldContext)
|
||||||
|
|
||||||
newRestriction->index = oldRestriction->index;
|
newRestriction->index = oldRestriction->index;
|
||||||
newRestriction->relationId = oldRestriction->relationId;
|
newRestriction->relationId = oldRestriction->relationId;
|
||||||
newRestriction->distributedRelation = oldRestriction->distributedRelation;
|
newRestriction->citusTable = oldRestriction->citusTable;
|
||||||
newRestriction->rte = copyObject(oldRestriction->rte);
|
newRestriction->rte = copyObject(oldRestriction->rte);
|
||||||
|
|
||||||
/* can't be copied, we copy (flatly) a RelOptInfo, and then decouple baserestrictinfo */
|
/* can't be copied, we copy (flatly) a RelOptInfo, and then decouple baserestrictinfo */
|
||||||
|
|
|
@ -224,7 +224,7 @@ ContextContainsLocalRelation(RelationRestrictionContext *restrictionContext)
|
||||||
{
|
{
|
||||||
RelationRestriction *relationRestriction = lfirst(relationRestrictionCell);
|
RelationRestriction *relationRestriction = lfirst(relationRestrictionCell);
|
||||||
|
|
||||||
if (!relationRestriction->distributedRelation)
|
if (!relationRestriction->citusTable)
|
||||||
{
|
{
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -66,6 +66,7 @@
|
||||||
#include "utils/syscache.h"
|
#include "utils/syscache.h"
|
||||||
|
|
||||||
#define STR_ERRCODE_UNDEFINED_OBJECT "42704"
|
#define STR_ERRCODE_UNDEFINED_OBJECT "42704"
|
||||||
|
#define STR_ERRCODE_OBJECT_IN_USE "55006"
|
||||||
|
|
||||||
|
|
||||||
#define REPLICATION_SLOT_CATALOG_TABLE_NAME "pg_replication_slots"
|
#define REPLICATION_SLOT_CATALOG_TABLE_NAME "pg_replication_slots"
|
||||||
|
@ -156,6 +157,10 @@ static void WaitForGroupedLogicalRepTargetsToBecomeReady(
|
||||||
static void WaitForGroupedLogicalRepTargetsToCatchUp(XLogRecPtr sourcePosition,
|
static void WaitForGroupedLogicalRepTargetsToCatchUp(XLogRecPtr sourcePosition,
|
||||||
GroupedLogicalRepTargets *
|
GroupedLogicalRepTargets *
|
||||||
groupedLogicalRepTargets);
|
groupedLogicalRepTargets);
|
||||||
|
static void RecreateGroupedLogicalRepTargetsConnections(
|
||||||
|
HTAB *groupedLogicalRepTargetsHash,
|
||||||
|
char *user,
|
||||||
|
char *databaseName);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* LogicallyReplicateShards replicates a list of shards from one node to another
|
* LogicallyReplicateShards replicates a list of shards from one node to another
|
||||||
|
@ -233,6 +238,26 @@ LogicallyReplicateShards(List *shardList, char *sourceNodeName, int sourceNodePo
|
||||||
/* only useful for isolation testing, see the function comment for the details */
|
/* only useful for isolation testing, see the function comment for the details */
|
||||||
ConflictOnlyWithIsolationTesting();
|
ConflictOnlyWithIsolationTesting();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We have to create the primary key (or any other replica identity)
|
||||||
|
* before the update/delete operations that are queued will be
|
||||||
|
* replicated. Because if the replica identity does not exist on the
|
||||||
|
* target, the replication would fail.
|
||||||
|
*
|
||||||
|
* So the latest possible moment we could do this is right after the
|
||||||
|
* initial data COPY, but before enabling the susbcriptions. It might
|
||||||
|
* seem like a good idea to it after the initial data COPY, since
|
||||||
|
* it's generally the rule that it's cheaper to build an index at once
|
||||||
|
* than to create it incrementally. This general rule, is why we create
|
||||||
|
* all the regular indexes as late during the move as possible.
|
||||||
|
*
|
||||||
|
* But as it turns out in practice it's not as clear cut, and we saw a
|
||||||
|
* speed degradation in the time it takes to move shards when doing the
|
||||||
|
* replica identity creation after the initial COPY. So, instead we
|
||||||
|
* keep it before the COPY.
|
||||||
|
*/
|
||||||
|
CreateReplicaIdentities(logicalRepTargetList);
|
||||||
|
|
||||||
CopyShardsToNode(sourceNode, targetNode, shardList, snapshot);
|
CopyShardsToNode(sourceNode, targetNode, shardList, snapshot);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -346,20 +371,6 @@ CompleteNonBlockingShardTransfer(List *shardList,
|
||||||
HTAB *groupedLogicalRepTargetsHash,
|
HTAB *groupedLogicalRepTargetsHash,
|
||||||
LogicalRepType type)
|
LogicalRepType type)
|
||||||
{
|
{
|
||||||
/*
|
|
||||||
* We have to create the primary key (or any other replica identity)
|
|
||||||
* before the update/delete operations that are queued will be
|
|
||||||
* replicated. Because if the replica identity does not exist on the
|
|
||||||
* target, the replication would fail.
|
|
||||||
*
|
|
||||||
* So we it right after the initial data COPY, but before enabling the
|
|
||||||
* susbcriptions. We do it at this latest possible moment, because its
|
|
||||||
* much cheaper to build an index at once than to create it
|
|
||||||
* incrementally. So this way we create the primary key index in one go
|
|
||||||
* for all data from the initial COPY.
|
|
||||||
*/
|
|
||||||
CreateReplicaIdentities(logicalRepTargetList);
|
|
||||||
|
|
||||||
/* Start applying the changes from the replication slots to catch up. */
|
/* Start applying the changes from the replication slots to catch up. */
|
||||||
EnableSubscriptions(logicalRepTargetList);
|
EnableSubscriptions(logicalRepTargetList);
|
||||||
|
|
||||||
|
@ -490,7 +501,7 @@ CreateShardMoveLogicalRepTargetList(HTAB *publicationInfoHash, List *shardList)
|
||||||
target->newShards = NIL;
|
target->newShards = NIL;
|
||||||
target->subscriptionOwnerName = SubscriptionRoleName(SHARD_MOVE, ownerId);
|
target->subscriptionOwnerName = SubscriptionRoleName(SHARD_MOVE, ownerId);
|
||||||
target->replicationSlot = palloc0(sizeof(ReplicationSlotInfo));
|
target->replicationSlot = palloc0(sizeof(ReplicationSlotInfo));
|
||||||
target->replicationSlot->name = ReplicationSlotName(SHARD_MOVE,
|
target->replicationSlot->name = ReplicationSlotNameForNodeAndOwner(SHARD_MOVE,
|
||||||
nodeId,
|
nodeId,
|
||||||
ownerId);
|
ownerId);
|
||||||
target->replicationSlot->targetNodeId = nodeId;
|
target->replicationSlot->targetNodeId = nodeId;
|
||||||
|
@ -559,10 +570,10 @@ DropAllLogicalReplicationLeftovers(LogicalRepType type)
|
||||||
char *databaseName = get_database_name(MyDatabaseId);
|
char *databaseName = get_database_name(MyDatabaseId);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We open new connections to all nodes. The reason for this is that
|
* We need connections that are not currently inside a transaction. The
|
||||||
* operations on subscriptions, publications and replication slotscannot be
|
* reason for this is that operations on subscriptions, publications and
|
||||||
* run in a transaction. By forcing a new connection we make sure no
|
* replication slots cannot be run in a transaction. By forcing a new
|
||||||
* transaction is active on the connection.
|
* connection we make sure no transaction is active on the connection.
|
||||||
*/
|
*/
|
||||||
int connectionFlags = FORCE_NEW_CONNECTION;
|
int connectionFlags = FORCE_NEW_CONNECTION;
|
||||||
|
|
||||||
|
@ -600,7 +611,9 @@ DropAllLogicalReplicationLeftovers(LogicalRepType type)
|
||||||
/*
|
/*
|
||||||
* We close all connections that we opened for the dropping here. That
|
* We close all connections that we opened for the dropping here. That
|
||||||
* way we don't keep these connections open unnecessarily during the
|
* way we don't keep these connections open unnecessarily during the
|
||||||
* 'LogicalRepType' operation (which can take a long time).
|
* 'LogicalRepType' operation (which can take a long time). We might
|
||||||
|
* need to reopen a few later on, but that seems better than keeping
|
||||||
|
* many open for no reason for a long time.
|
||||||
*/
|
*/
|
||||||
CloseConnection(cleanupConnection);
|
CloseConnection(cleanupConnection);
|
||||||
}
|
}
|
||||||
|
@ -1150,11 +1163,14 @@ CreatePartitioningHierarchy(List *logicalRepTargetList)
|
||||||
* parallel, so create them sequentially. Also attaching partition
|
* parallel, so create them sequentially. Also attaching partition
|
||||||
* is a quick operation, so it is fine to execute sequentially.
|
* is a quick operation, so it is fine to execute sequentially.
|
||||||
*/
|
*/
|
||||||
SendCommandListToWorkerOutsideTransaction(
|
|
||||||
|
MultiConnection *connection =
|
||||||
|
GetNodeUserDatabaseConnection(OUTSIDE_TRANSACTION,
|
||||||
target->superuserConnection->hostname,
|
target->superuserConnection->hostname,
|
||||||
target->superuserConnection->port,
|
target->superuserConnection->port,
|
||||||
tableOwner,
|
tableOwner, NULL);
|
||||||
list_make1(attachPartitionCommand));
|
ExecuteCriticalRemoteCommand(connection, attachPartitionCommand);
|
||||||
|
|
||||||
MemoryContextReset(localContext);
|
MemoryContextReset(localContext);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1203,10 +1219,8 @@ CreateUncheckedForeignKeyConstraints(List *logicalRepTargetList)
|
||||||
list_make1("SET LOCAL citus.skip_constraint_validation TO ON;"),
|
list_make1("SET LOCAL citus.skip_constraint_validation TO ON;"),
|
||||||
commandList);
|
commandList);
|
||||||
|
|
||||||
SendCommandListToWorkerOutsideTransaction(
|
SendCommandListToWorkerOutsideTransactionWithConnection(
|
||||||
target->superuserConnection->hostname,
|
target->superuserConnection,
|
||||||
target->superuserConnection->port,
|
|
||||||
target->superuserConnection->user,
|
|
||||||
commandList);
|
commandList);
|
||||||
|
|
||||||
MemoryContextReset(localContext);
|
MemoryContextReset(localContext);
|
||||||
|
@ -1281,18 +1295,64 @@ DropPublications(MultiConnection *sourceConnection, HTAB *publicationInfoHash)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* DropReplicationSlot drops the replication slot with the given name
|
* DropReplicationSlot drops the replication slot with the given name
|
||||||
* if it exists.
|
* if it exists. It retries if the command fails with an OBJECT_IN_USE error.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
DropReplicationSlot(MultiConnection *connection, char *replicationSlotName)
|
DropReplicationSlot(MultiConnection *connection, char *replicationSlotName)
|
||||||
{
|
{
|
||||||
ExecuteCriticalRemoteCommand(
|
int maxSecondsToTryDropping = 20;
|
||||||
|
bool raiseInterrupts = true;
|
||||||
|
PGresult *result = NULL;
|
||||||
|
|
||||||
|
/* we'll retry in case of an OBJECT_IN_USE error */
|
||||||
|
while (maxSecondsToTryDropping >= 0)
|
||||||
|
{
|
||||||
|
int querySent = SendRemoteCommand(
|
||||||
connection,
|
connection,
|
||||||
psprintf(
|
psprintf(
|
||||||
"select pg_drop_replication_slot(slot_name) from "
|
"select pg_drop_replication_slot(slot_name) from "
|
||||||
REPLICATION_SLOT_CATALOG_TABLE_NAME
|
REPLICATION_SLOT_CATALOG_TABLE_NAME
|
||||||
" where slot_name = %s",
|
" where slot_name = %s",
|
||||||
quote_literal_cstr(replicationSlotName)));
|
quote_literal_cstr(replicationSlotName))
|
||||||
|
);
|
||||||
|
|
||||||
|
if (querySent == 0)
|
||||||
|
{
|
||||||
|
ReportConnectionError(connection, ERROR);
|
||||||
|
}
|
||||||
|
|
||||||
|
result = GetRemoteCommandResult(connection, raiseInterrupts);
|
||||||
|
|
||||||
|
if (IsResponseOK(result))
|
||||||
|
{
|
||||||
|
/* no error, we are good to go */
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
char *errorcode = PQresultErrorField(result, PG_DIAG_SQLSTATE);
|
||||||
|
if (errorcode != NULL && strcmp(errorcode, STR_ERRCODE_OBJECT_IN_USE) == 0 &&
|
||||||
|
maxSecondsToTryDropping > 0)
|
||||||
|
{
|
||||||
|
/* retry dropping the replication slot after sleeping for one sec */
|
||||||
|
maxSecondsToTryDropping--;
|
||||||
|
pg_usleep(1000);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Report error if:
|
||||||
|
* - Error code is not 55006 (Object In Use)
|
||||||
|
* - Or, we have made enough number of retries (currently 20), but didn't work
|
||||||
|
*/
|
||||||
|
ReportResultError(connection, result, ERROR);
|
||||||
|
}
|
||||||
|
|
||||||
|
PQclear(result);
|
||||||
|
ForgetResults(connection);
|
||||||
|
}
|
||||||
|
|
||||||
|
PQclear(result);
|
||||||
|
ForgetResults(connection);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1321,11 +1381,14 @@ PublicationName(LogicalRepType type, uint32_t nodeId, Oid ownerId)
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ReplicationSlotName returns the name of the replication slot for the given
|
* ReplicationSlotNameForNodeAndOwner returns the name of the replication slot for the
|
||||||
* node and table owner.
|
* given node and table owner.
|
||||||
|
*
|
||||||
|
* Note that PG15 introduced a new ReplicationSlotName function that caused name conflicts
|
||||||
|
* and we renamed this function.
|
||||||
*/
|
*/
|
||||||
char *
|
char *
|
||||||
ReplicationSlotName(LogicalRepType type, uint32_t nodeId, Oid ownerId)
|
ReplicationSlotNameForNodeAndOwner(LogicalRepType type, uint32_t nodeId, Oid ownerId)
|
||||||
{
|
{
|
||||||
StringInfo slotName = makeStringInfo();
|
StringInfo slotName = makeStringInfo();
|
||||||
appendStringInfo(slotName, "%s%u_%u", replicationSlotPrefix[type], nodeId,
|
appendStringInfo(slotName, "%s%u_%u", replicationSlotPrefix[type], nodeId,
|
||||||
|
@ -1585,11 +1648,11 @@ DropUser(MultiConnection *connection, char *username)
|
||||||
* The DROP USER command should not propagate, so we temporarily disable
|
* The DROP USER command should not propagate, so we temporarily disable
|
||||||
* DDL propagation.
|
* DDL propagation.
|
||||||
*/
|
*/
|
||||||
SendCommandListToWorkerOutsideTransaction(
|
SendCommandListToWorkerOutsideTransactionWithConnection(
|
||||||
connection->hostname, connection->port, connection->user,
|
connection,
|
||||||
list_make2(
|
list_make2(
|
||||||
"SET LOCAL citus.enable_ddl_propagation TO OFF;",
|
"SET LOCAL citus.enable_ddl_propagation TO OFF;",
|
||||||
psprintf("DROP USER IF EXISTS %s",
|
psprintf("DROP USER IF EXISTS %s;",
|
||||||
quote_identifier(username))));
|
quote_identifier(username))));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1771,14 +1834,12 @@ CreateSubscriptions(MultiConnection *sourceConnection,
|
||||||
* create a user with SUPERUSER permissions and then alter it to NOSUPERUSER.
|
* create a user with SUPERUSER permissions and then alter it to NOSUPERUSER.
|
||||||
* This prevents permission escalations.
|
* This prevents permission escalations.
|
||||||
*/
|
*/
|
||||||
SendCommandListToWorkerOutsideTransaction(
|
SendCommandListToWorkerOutsideTransactionWithConnection(
|
||||||
target->superuserConnection->hostname,
|
target->superuserConnection,
|
||||||
target->superuserConnection->port,
|
|
||||||
target->superuserConnection->user,
|
|
||||||
list_make2(
|
list_make2(
|
||||||
"SET LOCAL citus.enable_ddl_propagation TO OFF;",
|
"SET LOCAL citus.enable_ddl_propagation TO OFF;",
|
||||||
psprintf(
|
psprintf(
|
||||||
"CREATE USER %s SUPERUSER IN ROLE %s",
|
"CREATE USER %s SUPERUSER IN ROLE %s;",
|
||||||
target->subscriptionOwnerName,
|
target->subscriptionOwnerName,
|
||||||
GetUserNameFromId(ownerId, false)
|
GetUserNameFromId(ownerId, false)
|
||||||
)));
|
)));
|
||||||
|
@ -1832,14 +1893,12 @@ CreateSubscriptions(MultiConnection *sourceConnection,
|
||||||
* The ALTER ROLE command should not propagate, so we temporarily
|
* The ALTER ROLE command should not propagate, so we temporarily
|
||||||
* disable DDL propagation.
|
* disable DDL propagation.
|
||||||
*/
|
*/
|
||||||
SendCommandListToWorkerOutsideTransaction(
|
SendCommandListToWorkerOutsideTransactionWithConnection(
|
||||||
target->superuserConnection->hostname,
|
target->superuserConnection,
|
||||||
target->superuserConnection->port,
|
|
||||||
target->superuserConnection->user,
|
|
||||||
list_make2(
|
list_make2(
|
||||||
"SET LOCAL citus.enable_ddl_propagation TO OFF;",
|
"SET LOCAL citus.enable_ddl_propagation TO OFF;",
|
||||||
psprintf(
|
psprintf(
|
||||||
"ALTER ROLE %s NOSUPERUSER",
|
"ALTER ROLE %s NOSUPERUSER;",
|
||||||
target->subscriptionOwnerName
|
target->subscriptionOwnerName
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
|
@ -2001,8 +2060,12 @@ CreateGroupedLogicalRepTargetsConnections(HTAB *groupedLogicalRepTargetsHash,
|
||||||
* RecreateGroupedLogicalRepTargetsConnections recreates connections for all of the
|
* RecreateGroupedLogicalRepTargetsConnections recreates connections for all of the
|
||||||
* nodes in the groupedLogicalRepTargetsHash where the old connection is broken or
|
* nodes in the groupedLogicalRepTargetsHash where the old connection is broken or
|
||||||
* currently running a query.
|
* currently running a query.
|
||||||
|
*
|
||||||
|
* IMPORTANT: When it recreates the connection, it doesn't close the existing
|
||||||
|
* connection. This means that this function should only be called when we know
|
||||||
|
* we'll throw an error afterwards, otherwise we would leak these connections.
|
||||||
*/
|
*/
|
||||||
void
|
static void
|
||||||
RecreateGroupedLogicalRepTargetsConnections(HTAB *groupedLogicalRepTargetsHash,
|
RecreateGroupedLogicalRepTargetsConnections(HTAB *groupedLogicalRepTargetsHash,
|
||||||
char *user,
|
char *user,
|
||||||
char *databaseName)
|
char *databaseName)
|
||||||
|
@ -2012,10 +2075,11 @@ RecreateGroupedLogicalRepTargetsConnections(HTAB *groupedLogicalRepTargetsHash,
|
||||||
GroupedLogicalRepTargets *groupedLogicalRepTargets = NULL;
|
GroupedLogicalRepTargets *groupedLogicalRepTargets = NULL;
|
||||||
foreach_htab(groupedLogicalRepTargets, &status, groupedLogicalRepTargetsHash)
|
foreach_htab(groupedLogicalRepTargets, &status, groupedLogicalRepTargetsHash)
|
||||||
{
|
{
|
||||||
if (groupedLogicalRepTargets->superuserConnection &&
|
MultiConnection *superuserConnection =
|
||||||
PQstatus(groupedLogicalRepTargets->superuserConnection->pgConn) ==
|
groupedLogicalRepTargets->superuserConnection;
|
||||||
CONNECTION_OK &&
|
if (superuserConnection &&
|
||||||
!PQisBusy(groupedLogicalRepTargets->superuserConnection->pgConn)
|
PQstatus(superuserConnection->pgConn) == CONNECTION_OK &&
|
||||||
|
!PQisBusy(superuserConnection->pgConn)
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
continue;
|
continue;
|
||||||
|
@ -2023,8 +2087,8 @@ RecreateGroupedLogicalRepTargetsConnections(HTAB *groupedLogicalRepTargetsHash,
|
||||||
WorkerNode *targetWorkerNode = FindNodeWithNodeId(
|
WorkerNode *targetWorkerNode = FindNodeWithNodeId(
|
||||||
groupedLogicalRepTargets->nodeId,
|
groupedLogicalRepTargets->nodeId,
|
||||||
false);
|
false);
|
||||||
MultiConnection *superuserConnection =
|
superuserConnection = GetNodeUserDatabaseConnection(
|
||||||
GetNodeUserDatabaseConnection(connectionFlags,
|
connectionFlags,
|
||||||
targetWorkerNode->workerName,
|
targetWorkerNode->workerName,
|
||||||
targetWorkerNode->workerPort,
|
targetWorkerNode->workerPort,
|
||||||
user,
|
user,
|
||||||
|
|
|
@ -34,6 +34,10 @@ static Oid FindTargetRelationOid(Relation sourceShardRelation,
|
||||||
HeapTuple tuple,
|
HeapTuple tuple,
|
||||||
char *currentSlotName);
|
char *currentSlotName);
|
||||||
|
|
||||||
|
static HeapTuple GetTupleForTargetSchema(HeapTuple sourceRelationTuple,
|
||||||
|
TupleDesc sourceTupleDesc,
|
||||||
|
TupleDesc targetTupleDesc);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Postgres uses 'pgoutput' as default plugin for logical replication.
|
* Postgres uses 'pgoutput' as default plugin for logical replication.
|
||||||
* We want to reuse Postgres pgoutput's functionality as much as possible.
|
* We want to reuse Postgres pgoutput's functionality as much as possible.
|
||||||
|
@ -129,6 +133,71 @@ split_change_cb(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
|
||||||
}
|
}
|
||||||
|
|
||||||
Relation targetRelation = RelationIdGetRelation(targetRelationOid);
|
Relation targetRelation = RelationIdGetRelation(targetRelationOid);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If any columns from source relation have been dropped, then the tuple needs to
|
||||||
|
* be formatted according to the target relation.
|
||||||
|
*/
|
||||||
|
TupleDesc sourceRelationDesc = RelationGetDescr(relation);
|
||||||
|
TupleDesc targetRelationDesc = RelationGetDescr(targetRelation);
|
||||||
|
if (sourceRelationDesc->natts > targetRelationDesc->natts)
|
||||||
|
{
|
||||||
|
switch (change->action)
|
||||||
|
{
|
||||||
|
case REORDER_BUFFER_CHANGE_INSERT:
|
||||||
|
{
|
||||||
|
HeapTuple sourceRelationNewTuple = &(change->data.tp.newtuple->tuple);
|
||||||
|
HeapTuple targetRelationNewTuple = GetTupleForTargetSchema(
|
||||||
|
sourceRelationNewTuple, sourceRelationDesc, targetRelationDesc);
|
||||||
|
|
||||||
|
change->data.tp.newtuple->tuple = *targetRelationNewTuple;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case REORDER_BUFFER_CHANGE_UPDATE:
|
||||||
|
{
|
||||||
|
HeapTuple sourceRelationNewTuple = &(change->data.tp.newtuple->tuple);
|
||||||
|
HeapTuple targetRelationNewTuple = GetTupleForTargetSchema(
|
||||||
|
sourceRelationNewTuple, sourceRelationDesc, targetRelationDesc);
|
||||||
|
|
||||||
|
change->data.tp.newtuple->tuple = *targetRelationNewTuple;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Format oldtuple according to the target relation. If the column values of replica
|
||||||
|
* identiy change, then the old tuple is non-null and needs to be formatted according
|
||||||
|
* to the target relation schema.
|
||||||
|
*/
|
||||||
|
if (change->data.tp.oldtuple != NULL)
|
||||||
|
{
|
||||||
|
HeapTuple sourceRelationOldTuple = &(change->data.tp.oldtuple->tuple);
|
||||||
|
HeapTuple targetRelationOldTuple = GetTupleForTargetSchema(
|
||||||
|
sourceRelationOldTuple,
|
||||||
|
sourceRelationDesc,
|
||||||
|
targetRelationDesc);
|
||||||
|
|
||||||
|
change->data.tp.oldtuple->tuple = *targetRelationOldTuple;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case REORDER_BUFFER_CHANGE_DELETE:
|
||||||
|
{
|
||||||
|
HeapTuple sourceRelationOldTuple = &(change->data.tp.oldtuple->tuple);
|
||||||
|
HeapTuple targetRelationOldTuple = GetTupleForTargetSchema(
|
||||||
|
sourceRelationOldTuple, sourceRelationDesc, targetRelationDesc);
|
||||||
|
|
||||||
|
change->data.tp.oldtuple->tuple = *targetRelationOldTuple;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Only INSERT/DELETE/UPDATE actions are visible in the replication path of split shard */
|
||||||
|
default:
|
||||||
|
ereport(ERROR, errmsg(
|
||||||
|
"Unexpected Action :%d. Expected action is INSERT/DELETE/UPDATE",
|
||||||
|
change->action));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pgoutputChangeCB(ctx, txn, targetRelation, change);
|
pgoutputChangeCB(ctx, txn, targetRelation, change);
|
||||||
RelationClose(targetRelation);
|
RelationClose(targetRelation);
|
||||||
}
|
}
|
||||||
|
@ -223,3 +292,51 @@ GetHashValueForIncomingTuple(Relation sourceShardRelation,
|
||||||
|
|
||||||
return DatumGetInt32(hashedValueDatum);
|
return DatumGetInt32(hashedValueDatum);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* GetTupleForTargetSchema returns a tuple with the schema of the target relation.
|
||||||
|
* If some columns within the source relations are dropped, we would have to reformat
|
||||||
|
* the tuple to match the schema of the target relation.
|
||||||
|
*
|
||||||
|
* Consider the below scenario:
|
||||||
|
* Session1 : Drop column followed by create_distributed_table_concurrently
|
||||||
|
* Session2 : Concurrent insert workload
|
||||||
|
*
|
||||||
|
* The child shards created by create_distributed_table_concurrently will have less columns
|
||||||
|
* than the source shard because some column were dropped.
|
||||||
|
* The incoming tuple from session2 will have more columns as the writes
|
||||||
|
* happened on source shard. But now the tuple needs to be applied on child shard. So we need to format
|
||||||
|
* it according to child schema.
|
||||||
|
*/
|
||||||
|
static HeapTuple
|
||||||
|
GetTupleForTargetSchema(HeapTuple sourceRelationTuple,
|
||||||
|
TupleDesc sourceRelDesc,
|
||||||
|
TupleDesc targetRelDesc)
|
||||||
|
{
|
||||||
|
/* Deform the tuple */
|
||||||
|
Datum *oldValues = (Datum *) palloc0(sourceRelDesc->natts * sizeof(Datum));
|
||||||
|
bool *oldNulls = (bool *) palloc0(sourceRelDesc->natts * sizeof(bool));
|
||||||
|
heap_deform_tuple(sourceRelationTuple, sourceRelDesc, oldValues,
|
||||||
|
oldNulls);
|
||||||
|
|
||||||
|
|
||||||
|
/* Create new tuple by skipping dropped columns */
|
||||||
|
int nextAttributeIndex = 0;
|
||||||
|
Datum *newValues = (Datum *) palloc0(targetRelDesc->natts * sizeof(Datum));
|
||||||
|
bool *newNulls = (bool *) palloc0(targetRelDesc->natts * sizeof(bool));
|
||||||
|
for (int i = 0; i < sourceRelDesc->natts; i++)
|
||||||
|
{
|
||||||
|
if (TupleDescAttr(sourceRelDesc, i)->attisdropped)
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
newValues[nextAttributeIndex] = oldValues[i];
|
||||||
|
newNulls[nextAttributeIndex] = oldNulls[i];
|
||||||
|
nextAttributeIndex++;
|
||||||
|
}
|
||||||
|
|
||||||
|
HeapTuple targetRelationTuple = heap_form_tuple(targetRelDesc, newValues, newNulls);
|
||||||
|
return targetRelationTuple;
|
||||||
|
}
|
||||||
|
|
|
@ -751,12 +751,11 @@ MarkRemoteTransactionCritical(struct MultiConnection *connection)
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* CloseRemoteTransaction handles closing a connection that, potentially, is
|
* ResetRemoteTransaction resets the state of the transaction after the end of
|
||||||
* part of a coordinated transaction. This should only ever be called from
|
* the main transaction, if the connection is being reused.
|
||||||
* connection_management.c, while closing a connection during a transaction.
|
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
CloseRemoteTransaction(struct MultiConnection *connection)
|
ResetRemoteTransaction(struct MultiConnection *connection)
|
||||||
{
|
{
|
||||||
RemoteTransaction *transaction = &connection->remoteTransaction;
|
RemoteTransaction *transaction = &connection->remoteTransaction;
|
||||||
|
|
||||||
|
@ -767,20 +766,14 @@ CloseRemoteTransaction(struct MultiConnection *connection)
|
||||||
|
|
||||||
dlist_delete(&connection->transactionNode);
|
dlist_delete(&connection->transactionNode);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* ResetRemoteTransaction resets the state of the transaction after the end of
|
|
||||||
* the main transaction, if the connection is being reused.
|
|
||||||
*/
|
|
||||||
void
|
|
||||||
ResetRemoteTransaction(struct MultiConnection *connection)
|
|
||||||
{
|
|
||||||
RemoteTransaction *transaction = &connection->remoteTransaction;
|
|
||||||
|
|
||||||
/* just reset the entire state, relying on 0 being invalid/false */
|
/* just reset the entire state, relying on 0 being invalid/false */
|
||||||
memset(transaction, 0, sizeof(*transaction));
|
memset(transaction, 0, sizeof(*transaction));
|
||||||
|
|
||||||
|
ResetShardPlacementAssociation(connection);
|
||||||
|
|
||||||
|
/* reset copy state */
|
||||||
|
connection->copyBytesWrittenSinceLastFlush = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -340,6 +340,25 @@ SendCommandListToWorkerOutsideTransaction(const char *nodeName, int32 nodePort,
|
||||||
nodeName, nodePort,
|
nodeName, nodePort,
|
||||||
nodeUser, NULL);
|
nodeUser, NULL);
|
||||||
|
|
||||||
|
SendCommandListToWorkerOutsideTransactionWithConnection(workerConnection,
|
||||||
|
commandList);
|
||||||
|
CloseConnection(workerConnection);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* SendCommandListToWorkerOutsideTransactionWithConnection sends the command list
|
||||||
|
* over the specified connection. This opens a new transaction on the
|
||||||
|
* connection, thus it's important that no transaction is currently open.
|
||||||
|
* This function is mainly useful to avoid opening an closing
|
||||||
|
* connections excessively by allowing reusing a single connection to send
|
||||||
|
* multiple separately committing transactions. The function raises an error if
|
||||||
|
* any of the queries fail.
|
||||||
|
*/
|
||||||
|
void
|
||||||
|
SendCommandListToWorkerOutsideTransactionWithConnection(MultiConnection *workerConnection,
|
||||||
|
List *commandList)
|
||||||
|
{
|
||||||
MarkRemoteTransactionCritical(workerConnection);
|
MarkRemoteTransactionCritical(workerConnection);
|
||||||
RemoteTransactionBegin(workerConnection);
|
RemoteTransactionBegin(workerConnection);
|
||||||
|
|
||||||
|
@ -351,7 +370,7 @@ SendCommandListToWorkerOutsideTransaction(const char *nodeName, int32 nodePort,
|
||||||
}
|
}
|
||||||
|
|
||||||
RemoteTransactionCommit(workerConnection);
|
RemoteTransactionCommit(workerConnection);
|
||||||
CloseConnection(workerConnection);
|
ResetRemoteTransaction(workerConnection);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -430,21 +449,18 @@ SendMetadataCommandListToWorkerListInCoordinatedTransaction(List *workerNodeList
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* SendOptionalCommandListToWorkerOutsideTransaction sends the given command
|
* SendOptionalCommandListToWorkerOutsideTransactionWithConnection sends the
|
||||||
* list to the given worker in a single transaction that is outside of the
|
* given command list over a specified connection in a single transaction that
|
||||||
* coordinated tranaction. If any of the commands fail, it rollbacks the
|
* is outside of the coordinated tranaction.
|
||||||
* transaction, and otherwise commits.
|
*
|
||||||
|
* If any of the commands fail, it rollbacks the transaction, and otherwise commits.
|
||||||
|
* A successful commit is indicated by returning true, and a failed commit by returning
|
||||||
|
* false.
|
||||||
*/
|
*/
|
||||||
bool
|
bool
|
||||||
SendOptionalCommandListToWorkerOutsideTransaction(const char *nodeName, int32 nodePort,
|
SendOptionalCommandListToWorkerOutsideTransactionWithConnection(
|
||||||
const char *nodeUser, List *commandList)
|
MultiConnection *workerConnection, List *commandList)
|
||||||
{
|
{
|
||||||
int connectionFlags = FORCE_NEW_CONNECTION;
|
|
||||||
bool failed = false;
|
|
||||||
|
|
||||||
MultiConnection *workerConnection = GetNodeUserDatabaseConnection(connectionFlags,
|
|
||||||
nodeName, nodePort,
|
|
||||||
nodeUser, NULL);
|
|
||||||
if (PQstatus(workerConnection->pgConn) != CONNECTION_OK)
|
if (PQstatus(workerConnection->pgConn) != CONNECTION_OK)
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
|
@ -452,6 +468,7 @@ SendOptionalCommandListToWorkerOutsideTransaction(const char *nodeName, int32 no
|
||||||
RemoteTransactionBegin(workerConnection);
|
RemoteTransactionBegin(workerConnection);
|
||||||
|
|
||||||
/* iterate over the commands and execute them in the same connection */
|
/* iterate over the commands and execute them in the same connection */
|
||||||
|
bool failed = false;
|
||||||
const char *commandString = NULL;
|
const char *commandString = NULL;
|
||||||
foreach_ptr(commandString, commandList)
|
foreach_ptr(commandString, commandList)
|
||||||
{
|
{
|
||||||
|
@ -471,6 +488,30 @@ SendOptionalCommandListToWorkerOutsideTransaction(const char *nodeName, int32 no
|
||||||
RemoteTransactionCommit(workerConnection);
|
RemoteTransactionCommit(workerConnection);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ResetRemoteTransaction(workerConnection);
|
||||||
|
|
||||||
|
return !failed;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* SendOptionalCommandListToWorkerOutsideTransaction sends the given command
|
||||||
|
* list to the given worker in a single transaction that is outside of the
|
||||||
|
* coordinated tranaction. If any of the commands fail, it rollbacks the
|
||||||
|
* transaction, and otherwise commits.
|
||||||
|
*/
|
||||||
|
bool
|
||||||
|
SendOptionalCommandListToWorkerOutsideTransaction(const char *nodeName, int32 nodePort,
|
||||||
|
const char *nodeUser, List *commandList)
|
||||||
|
{
|
||||||
|
int connectionFlags = FORCE_NEW_CONNECTION;
|
||||||
|
|
||||||
|
MultiConnection *workerConnection = GetNodeUserDatabaseConnection(connectionFlags,
|
||||||
|
nodeName, nodePort,
|
||||||
|
nodeUser, NULL);
|
||||||
|
bool failed = SendOptionalCommandListToWorkerOutsideTransactionWithConnection(
|
||||||
|
workerConnection,
|
||||||
|
commandList);
|
||||||
CloseConnection(workerConnection);
|
CloseConnection(workerConnection);
|
||||||
|
|
||||||
return !failed;
|
return !failed;
|
||||||
|
|
|
@ -314,6 +314,7 @@ extern Datum columnar_relation_storageid(PG_FUNCTION_ARGS);
|
||||||
/* write_state_management.c */
|
/* write_state_management.c */
|
||||||
extern ColumnarWriteState * columnar_init_write_state(Relation relation, TupleDesc
|
extern ColumnarWriteState * columnar_init_write_state(Relation relation, TupleDesc
|
||||||
tupdesc,
|
tupdesc,
|
||||||
|
Oid tupSlotRelationId,
|
||||||
SubTransactionId currentSubXid);
|
SubTransactionId currentSubXid);
|
||||||
extern void FlushWriteStateForRelfilenode(Oid relfilenode, SubTransactionId
|
extern void FlushWriteStateForRelfilenode(Oid relfilenode, SubTransactionId
|
||||||
currentSubXid);
|
currentSubXid);
|
||||||
|
|
|
@ -10,9 +10,11 @@
|
||||||
#ifndef CITUS_SEQUENCE_H
|
#ifndef CITUS_SEQUENCE_H
|
||||||
#define CITUS_SEQUENCE_H
|
#define CITUS_SEQUENCE_H
|
||||||
|
|
||||||
|
#include "access/attnum.h"
|
||||||
#include "nodes/pg_list.h"
|
#include "nodes/pg_list.h"
|
||||||
|
|
||||||
|
|
||||||
|
extern bool ColumnDefaultsToNextVal(Oid relationId, AttrNumber attrNumber);
|
||||||
extern void ExtractDefaultColumnsAndOwnedSequences(Oid relationId,
|
extern void ExtractDefaultColumnsAndOwnedSequences(Oid relationId,
|
||||||
List **columnNameList,
|
List **columnNameList,
|
||||||
List **ownedSequenceIdList);
|
List **ownedSequenceIdList);
|
||||||
|
|
|
@ -289,6 +289,7 @@ extern MultiConnection * StartNodeConnection(uint32 flags, const char *hostname,
|
||||||
extern MultiConnection * GetNodeUserDatabaseConnection(uint32 flags, const char *hostname,
|
extern MultiConnection * GetNodeUserDatabaseConnection(uint32 flags, const char *hostname,
|
||||||
int32 port, const char *user,
|
int32 port, const char *user,
|
||||||
const char *database);
|
const char *database);
|
||||||
|
extern MultiConnection * GetConnectionForLocalQueriesOutsideTransaction(char *userName);
|
||||||
extern MultiConnection * StartNodeUserDatabaseConnection(uint32 flags,
|
extern MultiConnection * StartNodeUserDatabaseConnection(uint32 flags,
|
||||||
const char *hostname,
|
const char *hostname,
|
||||||
int32 port,
|
int32 port,
|
||||||
|
|
|
@ -56,7 +56,7 @@ typedef struct RelationRestriction
|
||||||
{
|
{
|
||||||
Index index;
|
Index index;
|
||||||
Oid relationId;
|
Oid relationId;
|
||||||
bool distributedRelation;
|
bool citusTable;
|
||||||
RangeTblEntry *rte;
|
RangeTblEntry *rte;
|
||||||
RelOptInfo *relOptInfo;
|
RelOptInfo *relOptInfo;
|
||||||
PlannerInfo *plannerInfo;
|
PlannerInfo *plannerInfo;
|
||||||
|
|
|
@ -157,7 +157,8 @@ extern void DropPublications(MultiConnection *sourceConnection,
|
||||||
extern void DropAllLogicalReplicationLeftovers(LogicalRepType type);
|
extern void DropAllLogicalReplicationLeftovers(LogicalRepType type);
|
||||||
|
|
||||||
extern char * PublicationName(LogicalRepType type, uint32_t nodeId, Oid ownerId);
|
extern char * PublicationName(LogicalRepType type, uint32_t nodeId, Oid ownerId);
|
||||||
extern char * ReplicationSlotName(LogicalRepType type, uint32_t nodeId, Oid ownerId);
|
extern char * ReplicationSlotNameForNodeAndOwner(LogicalRepType type, uint32_t nodeId, Oid
|
||||||
|
ownerId);
|
||||||
extern char * SubscriptionName(LogicalRepType type, Oid ownerId);
|
extern char * SubscriptionName(LogicalRepType type, Oid ownerId);
|
||||||
extern char * SubscriptionRoleName(LogicalRepType type, Oid ownerId);
|
extern char * SubscriptionRoleName(LogicalRepType type, Oid ownerId);
|
||||||
|
|
||||||
|
@ -172,10 +173,6 @@ extern HTAB * CreateGroupedLogicalRepTargetsHash(List *subscriptionInfoList);
|
||||||
extern void CreateGroupedLogicalRepTargetsConnections(HTAB *groupedLogicalRepTargetsHash,
|
extern void CreateGroupedLogicalRepTargetsConnections(HTAB *groupedLogicalRepTargetsHash,
|
||||||
char *user,
|
char *user,
|
||||||
char *databaseName);
|
char *databaseName);
|
||||||
extern void RecreateGroupedLogicalRepTargetsConnections(
|
|
||||||
HTAB *groupedLogicalRepTargetsHash,
|
|
||||||
char *user,
|
|
||||||
char *databaseName);
|
|
||||||
extern void CloseGroupedLogicalRepTargetsConnections(HTAB *groupedLogicalRepTargetsHash);
|
extern void CloseGroupedLogicalRepTargetsConnections(HTAB *groupedLogicalRepTargetsHash);
|
||||||
extern void CompleteNonBlockingShardTransfer(List *shardList,
|
extern void CompleteNonBlockingShardTransfer(List *shardList,
|
||||||
MultiConnection *sourceConnection,
|
MultiConnection *sourceConnection,
|
||||||
|
|
|
@ -130,7 +130,6 @@ extern void MarkRemoteTransactionCritical(struct MultiConnection *connection);
|
||||||
* transaction managment code.
|
* transaction managment code.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
extern void CloseRemoteTransaction(struct MultiConnection *connection);
|
|
||||||
extern void ResetRemoteTransaction(struct MultiConnection *connection);
|
extern void ResetRemoteTransaction(struct MultiConnection *connection);
|
||||||
|
|
||||||
/* perform handling for all in-progress transactions */
|
/* perform handling for all in-progress transactions */
|
||||||
|
|
|
@ -103,13 +103,13 @@ extern void InsertCleanupRecordInSubtransaction(CleanupObject objectType,
|
||||||
* completion on failure. This will trigger cleanup of appropriate resources
|
* completion on failure. This will trigger cleanup of appropriate resources
|
||||||
* and cleanup records.
|
* and cleanup records.
|
||||||
*/
|
*/
|
||||||
extern void FinalizeOperationNeedingCleanupOnFailure(void);
|
extern void FinalizeOperationNeedingCleanupOnFailure(const char *operationName);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FinalizeOperationNeedingCleanupOnSuccess is be called by an operation to signal
|
* FinalizeOperationNeedingCleanupOnSuccess is be called by an operation to signal
|
||||||
* completion on success. This will trigger cleanup of appropriate resources
|
* completion on success. This will trigger cleanup of appropriate resources
|
||||||
* and cleanup records.
|
* and cleanup records.
|
||||||
*/
|
*/
|
||||||
extern void FinalizeOperationNeedingCleanupOnSuccess(void);
|
extern void FinalizeOperationNeedingCleanupOnSuccess(const char *operationName);
|
||||||
|
|
||||||
#endif /*CITUS_SHARD_CLEANER_H */
|
#endif /*CITUS_SHARD_CLEANER_H */
|
||||||
|
|
|
@ -12,6 +12,7 @@
|
||||||
#ifndef WORKER_TRANSACTION_H
|
#ifndef WORKER_TRANSACTION_H
|
||||||
#define WORKER_TRANSACTION_H
|
#define WORKER_TRANSACTION_H
|
||||||
|
|
||||||
|
#include "distributed/connection_management.h"
|
||||||
#include "distributed/worker_manager.h"
|
#include "distributed/worker_manager.h"
|
||||||
#include "storage/lockdefs.h"
|
#include "storage/lockdefs.h"
|
||||||
|
|
||||||
|
@ -59,6 +60,10 @@ extern bool SendOptionalCommandListToWorkerOutsideTransaction(const char *nodeNa
|
||||||
int32 nodePort,
|
int32 nodePort,
|
||||||
const char *nodeUser,
|
const char *nodeUser,
|
||||||
List *commandList);
|
List *commandList);
|
||||||
|
extern bool SendOptionalCommandListToWorkerOutsideTransactionWithConnection(
|
||||||
|
MultiConnection *workerConnection,
|
||||||
|
List *
|
||||||
|
commandList);
|
||||||
extern bool SendOptionalMetadataCommandListToWorkerInCoordinatedTransaction(const
|
extern bool SendOptionalMetadataCommandListToWorkerInCoordinatedTransaction(const
|
||||||
char *nodeName,
|
char *nodeName,
|
||||||
int32 nodePort,
|
int32 nodePort,
|
||||||
|
@ -74,6 +79,9 @@ extern void SendCommandListToWorkerOutsideTransaction(const char *nodeName,
|
||||||
int32 nodePort,
|
int32 nodePort,
|
||||||
const char *nodeUser,
|
const char *nodeUser,
|
||||||
List *commandList);
|
List *commandList);
|
||||||
|
extern void SendCommandListToWorkerOutsideTransactionWithConnection(
|
||||||
|
MultiConnection *workerConnection,
|
||||||
|
List *commandList);
|
||||||
extern void SendMetadataCommandListToWorkerListInCoordinatedTransaction(
|
extern void SendMetadataCommandListToWorkerListInCoordinatedTransaction(
|
||||||
List *workerNodeList,
|
List *workerNodeList,
|
||||||
const char *
|
const char *
|
||||||
|
|
|
@ -6,7 +6,9 @@ test: isolation_setup
|
||||||
test: isolation_cluster_management
|
test: isolation_cluster_management
|
||||||
|
|
||||||
test: isolation_logical_replication_single_shard_commands
|
test: isolation_logical_replication_single_shard_commands
|
||||||
|
test: isolation_logical_replication_nonsu_nonbypassrls
|
||||||
test: isolation_logical_replication_multi_shard_commands
|
test: isolation_logical_replication_multi_shard_commands
|
||||||
test: isolation_non_blocking_shard_split
|
test: isolation_non_blocking_shard_split
|
||||||
|
test: isolation_create_distributed_concurrently_after_drop_column
|
||||||
test: isolation_non_blocking_shard_split_with_index_as_replicaIdentity
|
test: isolation_non_blocking_shard_split_with_index_as_replicaIdentity
|
||||||
test: isolation_non_blocking_shard_split_fkey
|
test: isolation_non_blocking_shard_split_fkey
|
||||||
|
|
|
@ -100,6 +100,161 @@ SELECT STRING_AGG(table_name::text, ', ' ORDER BY 1) AS "Colocation Groups" FROM
|
||||||
dist_table
|
dist_table
|
||||||
(3 rows)
|
(3 rows)
|
||||||
|
|
||||||
|
-- right now dist_table has columns a, b, dist_column is b, it has 6 shards
|
||||||
|
-- column cache is: a pos 1, b pos 2
|
||||||
|
-- let's add another column
|
||||||
|
ALTER TABLE dist_table ADD COLUMN c int DEFAULT 1;
|
||||||
|
-- right now column cache is: a pos 1, b pos 2, c pos 3
|
||||||
|
-- test using alter_distributed_table to change shard count after dropping one column
|
||||||
|
ALTER TABLE dist_table DROP COLUMN a;
|
||||||
|
-- right now column cache is: a pos 1 attisdropped=true, b pos 2, c pos 3
|
||||||
|
-- let's try changing the shard count
|
||||||
|
SELECT alter_distributed_table('dist_table', shard_count := 7, cascade_to_colocated := false);
|
||||||
|
NOTICE: creating a new table for alter_distributed_table.dist_table
|
||||||
|
NOTICE: moving the data of alter_distributed_table.dist_table
|
||||||
|
NOTICE: dropping the old alter_distributed_table.dist_table
|
||||||
|
NOTICE: renaming the new table to alter_distributed_table.dist_table
|
||||||
|
alter_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- right now column cache is: b pos 1, c pos 2 because a new table has been created
|
||||||
|
-- check that b is still distribution column
|
||||||
|
SELECT table_name, citus_table_type, distribution_column, shard_count FROM public.citus_tables
|
||||||
|
WHERE table_name = 'dist_table'::regclass;
|
||||||
|
table_name | citus_table_type | distribution_column | shard_count
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
dist_table | distributed | b | 7
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- let's add another column
|
||||||
|
ALTER TABLE dist_table ADD COLUMN d int DEFAULT 2;
|
||||||
|
-- right now column cache is: b pos 1, c pos 2, d pos 3, dist_column is b
|
||||||
|
-- test using alter_distributed_table to change dist. column after dropping one column
|
||||||
|
ALTER TABLE dist_table DROP COLUMN c;
|
||||||
|
-- right now column cache is: b pos 1, c pos 2 attisdropped=true, d pos 3
|
||||||
|
-- let's try changing the distribution column
|
||||||
|
SELECT alter_distributed_table('dist_table', distribution_column := 'd');
|
||||||
|
NOTICE: creating a new table for alter_distributed_table.dist_table
|
||||||
|
NOTICE: moving the data of alter_distributed_table.dist_table
|
||||||
|
NOTICE: dropping the old alter_distributed_table.dist_table
|
||||||
|
NOTICE: renaming the new table to alter_distributed_table.dist_table
|
||||||
|
alter_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- right now column cache is: b pos 1, d pos 2 because a new table has been created
|
||||||
|
-- check that d is the distribution column
|
||||||
|
SELECT table_name, citus_table_type, distribution_column, shard_count FROM public.citus_tables
|
||||||
|
WHERE table_name = 'dist_table'::regclass;
|
||||||
|
table_name | citus_table_type | distribution_column | shard_count
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
dist_table | distributed | d | 7
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- add another column and undistribute
|
||||||
|
ALTER TABLE dist_table ADD COLUMN e int DEFAULT 3;
|
||||||
|
SELECT undistribute_table('dist_table');
|
||||||
|
NOTICE: creating a new table for alter_distributed_table.dist_table
|
||||||
|
NOTICE: moving the data of alter_distributed_table.dist_table
|
||||||
|
NOTICE: dropping the old alter_distributed_table.dist_table
|
||||||
|
NOTICE: renaming the new table to alter_distributed_table.dist_table
|
||||||
|
undistribute_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- right now column cache is: b pos 1, d pos 2, e pos 3, table is not Citus table
|
||||||
|
-- try dropping column and then distributing
|
||||||
|
ALTER TABLE dist_table DROP COLUMN b;
|
||||||
|
-- right now column cache is: b pos 1 attisdropped=true, d pos 2, e pos 3
|
||||||
|
-- distribute with d
|
||||||
|
SELECT create_distributed_table ('dist_table', 'd', colocate_with := 'none');
|
||||||
|
NOTICE: Copying data from local table...
|
||||||
|
NOTICE: copying the data has completed
|
||||||
|
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||||
|
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$alter_distributed_table.dist_table$$)
|
||||||
|
create_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- check that d is the distribution column
|
||||||
|
SELECT table_name, citus_table_type, distribution_column, shard_count FROM public.citus_tables
|
||||||
|
WHERE table_name = 'dist_table'::regclass;
|
||||||
|
table_name | citus_table_type | distribution_column | shard_count
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
dist_table | distributed | d | 4
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- alter distribution column to e
|
||||||
|
SELECT alter_distributed_table('dist_table', distribution_column := 'e');
|
||||||
|
NOTICE: creating a new table for alter_distributed_table.dist_table
|
||||||
|
NOTICE: moving the data of alter_distributed_table.dist_table
|
||||||
|
NOTICE: dropping the old alter_distributed_table.dist_table
|
||||||
|
NOTICE: renaming the new table to alter_distributed_table.dist_table
|
||||||
|
alter_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- right now column cache is: d pos 1, e pos 2
|
||||||
|
-- check that e is the distribution column
|
||||||
|
SELECT table_name, citus_table_type, distribution_column, shard_count FROM public.citus_tables
|
||||||
|
WHERE table_name = 'dist_table'::regclass;
|
||||||
|
table_name | citus_table_type | distribution_column | shard_count
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
dist_table | distributed | e | 4
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
ALTER TABLE dist_table ADD COLUMN a int DEFAULT 4;
|
||||||
|
ALTER TABLE dist_table ADD COLUMN b int DEFAULT 5;
|
||||||
|
-- right now column cache is: d pos 1, e pos 2, a pos 3, b pos 4
|
||||||
|
-- alter distribution column to a
|
||||||
|
SELECT alter_distributed_table('dist_table', distribution_column := 'a');
|
||||||
|
NOTICE: creating a new table for alter_distributed_table.dist_table
|
||||||
|
NOTICE: moving the data of alter_distributed_table.dist_table
|
||||||
|
NOTICE: dropping the old alter_distributed_table.dist_table
|
||||||
|
NOTICE: renaming the new table to alter_distributed_table.dist_table
|
||||||
|
alter_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- right now column cache hasn't changed
|
||||||
|
-- check that a is the distribution column
|
||||||
|
SELECT table_name, citus_table_type, distribution_column, shard_count FROM public.citus_tables
|
||||||
|
WHERE table_name = 'dist_table'::regclass;
|
||||||
|
table_name | citus_table_type | distribution_column | shard_count
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
dist_table | distributed | a | 4
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
ALTER TABLE dist_table DROP COLUMN d;
|
||||||
|
ALTER TABLE dist_table DROP COLUMN e;
|
||||||
|
-- right now column cache is: d pos 1 attisdropped=true, e pos 2 attisdropped=true, a pos 3, b pos 4
|
||||||
|
-- alter distribution column to b
|
||||||
|
SELECT alter_distributed_table('dist_table', distribution_column := 'b');
|
||||||
|
NOTICE: creating a new table for alter_distributed_table.dist_table
|
||||||
|
NOTICE: moving the data of alter_distributed_table.dist_table
|
||||||
|
NOTICE: dropping the old alter_distributed_table.dist_table
|
||||||
|
NOTICE: renaming the new table to alter_distributed_table.dist_table
|
||||||
|
alter_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- column cache is: a pos 1, b pos 2 -> configuration with which we started these drop column tests
|
||||||
|
-- check that b is the distribution column
|
||||||
|
SELECT table_name, citus_table_type, distribution_column, shard_count FROM public.citus_tables
|
||||||
|
WHERE table_name = 'dist_table'::regclass;
|
||||||
|
table_name | citus_table_type | distribution_column | shard_count
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
dist_table | distributed | b | 4
|
||||||
|
(1 row)
|
||||||
|
|
||||||
-- test altering colocation, note that shard count will also change
|
-- test altering colocation, note that shard count will also change
|
||||||
SELECT alter_distributed_table('dist_table', colocate_with := 'alter_distributed_table.colocation_table');
|
SELECT alter_distributed_table('dist_table', colocate_with := 'alter_distributed_table.colocation_table');
|
||||||
NOTICE: creating a new table for alter_distributed_table.dist_table
|
NOTICE: creating a new table for alter_distributed_table.dist_table
|
||||||
|
|
|
@ -319,5 +319,42 @@ SELECT COUNT(*) FROM public.test_search_path;
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
ALTER USER current_user RESET search_path;
|
ALTER USER current_user RESET search_path;
|
||||||
|
-- test empty/null password: it is treated the same as no password
|
||||||
|
SET password_encryption TO md5;
|
||||||
|
CREATE ROLE new_role;
|
||||||
|
SELECT workers.result AS worker_password, pg_authid.rolpassword AS coord_password FROM run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname = 'new_role'$$) workers, pg_authid WHERE pg_authid.rolname = 'new_role';
|
||||||
|
worker_password | coord_password
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
|
||||||
|
|
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
ALTER ROLE new_role PASSWORD '';
|
||||||
|
NOTICE: empty string is not a valid password, clearing password
|
||||||
|
SELECT workers.result AS worker_password, pg_authid.rolpassword AS coord_password FROM run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname = 'new_role'$$) workers, pg_authid WHERE pg_authid.rolname = 'new_role';
|
||||||
|
worker_password | coord_password
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
|
||||||
|
|
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
ALTER ROLE new_role PASSWORD 'new_password';
|
||||||
|
SELECT workers.result AS worker_password, pg_authid.rolpassword AS coord_password, workers.result = pg_authid.rolpassword AS password_is_same FROM run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname = 'new_role'$$) workers, pg_authid WHERE pg_authid.rolname = 'new_role';
|
||||||
|
worker_password | coord_password | password_is_same
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
md51a28da0f1a2416525eec435bdce8cbbe | md51a28da0f1a2416525eec435bdce8cbbe | t
|
||||||
|
md51a28da0f1a2416525eec435bdce8cbbe | md51a28da0f1a2416525eec435bdce8cbbe | t
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
ALTER ROLE new_role PASSWORD NULL;
|
||||||
|
SELECT workers.result AS worker_password, pg_authid.rolpassword AS coord_password FROM run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname = 'new_role'$$) workers, pg_authid WHERE pg_authid.rolname = 'new_role';
|
||||||
|
worker_password | coord_password
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
|
||||||
|
|
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
RESET password_encryption;
|
||||||
|
DROP ROLE new_role;
|
||||||
DROP TABLE test_search_path;
|
DROP TABLE test_search_path;
|
||||||
DROP SCHEMA alter_role, ",CitUs,.TeeN!?", test_sp CASCADE;
|
DROP SCHEMA alter_role, ",CitUs,.TeeN!?", test_sp CASCADE;
|
||||||
|
|
|
@ -176,5 +176,39 @@ SELECT citus_rebalance_wait();
|
||||||
|
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
|
DROP TABLE t1;
|
||||||
|
-- make sure a non-super user can stop rebalancing
|
||||||
|
CREATE USER non_super_user_rebalance WITH LOGIN;
|
||||||
|
GRANT ALL ON SCHEMA background_rebalance TO non_super_user_rebalance;
|
||||||
|
SET ROLE non_super_user_rebalance;
|
||||||
|
CREATE TABLE non_super_user_t1 (a int PRIMARY KEY);
|
||||||
|
SELECT create_distributed_table('non_super_user_t1', 'a', shard_count => 4, colocate_with => 'none');
|
||||||
|
create_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_move_shard_placement(85674008, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode => 'block_writes');
|
||||||
|
citus_move_shard_placement
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT 1 FROM citus_rebalance_start();
|
||||||
|
NOTICE: Scheduled 1 moves as job xxx
|
||||||
|
DETAIL: Rebalance scheduled as background job
|
||||||
|
HINT: To monitor progress, run: SELECT * FROM pg_dist_background_task WHERE job_id = xxx ORDER BY task_id ASC; or SELECT * FROM get_rebalance_progress();
|
||||||
|
?column?
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_rebalance_stop();
|
||||||
|
citus_rebalance_stop
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
RESET ROLE;
|
||||||
SET client_min_messages TO WARNING;
|
SET client_min_messages TO WARNING;
|
||||||
DROP SCHEMA background_rebalance CASCADE;
|
DROP SCHEMA background_rebalance CASCADE;
|
||||||
|
|
|
@ -232,6 +232,30 @@ SELECT * FROM citus_local_table_triggers
|
||||||
truncate_trigger_xxxxxxx | "interesting!schema"."citus_local!_table" | O
|
truncate_trigger_xxxxxxx | "interesting!schema"."citus_local!_table" | O
|
||||||
(3 rows)
|
(3 rows)
|
||||||
|
|
||||||
|
-- ALTER TABLE ENABLE REPLICA trigger
|
||||||
|
ALTER TABLE "interesting!schema"."citus_local!_table" ENABLE REPLICA TRIGGER "trigger\'name22";
|
||||||
|
NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1507008, 'interesting!schema', E'ALTER TABLE "interesting!schema"."citus_local!_table" ENABLE REPLICA TRIGGER "trigger\\''name22";')
|
||||||
|
SELECT * FROM citus_local_table_triggers
|
||||||
|
WHERE tgname NOT LIKE 'RI_ConstraintTrigger%';
|
||||||
|
tgname | tgrelid | tgenabled
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
trigger\'name22 | "interesting!schema"."citus_local!_table" | R
|
||||||
|
trigger\'name22_1507008 | "interesting!schema"."citus_local!_table_1507008" | R
|
||||||
|
truncate_trigger_xxxxxxx | "interesting!schema"."citus_local!_table" | O
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
-- ALTER TABLE ENABLE ALWAYS trigger
|
||||||
|
ALTER TABLE "interesting!schema"."citus_local!_table" ENABLE ALWAYS TRIGGER "trigger\'name22";
|
||||||
|
NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1507008, 'interesting!schema', E'ALTER TABLE "interesting!schema"."citus_local!_table" ENABLE ALWAYS TRIGGER "trigger\\''name22";')
|
||||||
|
SELECT * FROM citus_local_table_triggers
|
||||||
|
WHERE tgname NOT LIKE 'RI_ConstraintTrigger%';
|
||||||
|
tgname | tgrelid | tgenabled
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
trigger\'name22 | "interesting!schema"."citus_local!_table" | A
|
||||||
|
trigger\'name22_1507008 | "interesting!schema"."citus_local!_table_1507008" | A
|
||||||
|
truncate_trigger_xxxxxxx | "interesting!schema"."citus_local!_table" | O
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
-- ALTER TABLE DISABLE trigger
|
-- ALTER TABLE DISABLE trigger
|
||||||
ALTER TABLE "interesting!schema"."citus_local!_table" DISABLE TRIGGER "trigger\'name22";
|
ALTER TABLE "interesting!schema"."citus_local!_table" DISABLE TRIGGER "trigger\'name22";
|
||||||
NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1507008, 'interesting!schema', E'ALTER TABLE "interesting!schema"."citus_local!_table" DISABLE TRIGGER "trigger\\''name22";')
|
NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1507008, 'interesting!schema', E'ALTER TABLE "interesting!schema"."citus_local!_table" DISABLE TRIGGER "trigger\\''name22";')
|
||||||
|
|
|
@ -40,7 +40,13 @@ SELECT 1 FROM master_remove_node('localhost', :master_port);
|
||||||
CREATE TABLE citus_local_table_1 (a int primary key);
|
CREATE TABLE citus_local_table_1 (a int primary key);
|
||||||
-- this should fail as coordinator is removed from pg_dist_node
|
-- this should fail as coordinator is removed from pg_dist_node
|
||||||
SELECT citus_add_local_table_to_metadata('citus_local_table_1');
|
SELECT citus_add_local_table_to_metadata('citus_local_table_1');
|
||||||
ERROR: could not find the coordinator node in metadata as it is not added as a worker
|
ERROR: operation is not allowed when coordinator is not added into metadata
|
||||||
|
-- This should also fail as coordinator is removed from pg_dist_node.
|
||||||
|
--
|
||||||
|
-- This is not a great place to test this but is one of those places that we
|
||||||
|
-- have workers in metadata but not the coordinator.
|
||||||
|
SELECT create_distributed_table_concurrently('citus_local_table_1', 'a');
|
||||||
|
ERROR: operation is not allowed when coordinator is not added into metadata
|
||||||
-- let coordinator have citus local tables again for next tests
|
-- let coordinator have citus local tables again for next tests
|
||||||
set client_min_messages to ERROR;
|
set client_min_messages to ERROR;
|
||||||
SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0);
|
SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0);
|
||||||
|
|
|
@ -146,9 +146,12 @@ SELECT master_get_table_ddl_events('test_table');
|
||||||
CREATE TABLE table_triggers_schema.test_table (id integer, text_number text, text_col text)
|
CREATE TABLE table_triggers_schema.test_table (id integer, text_number text, text_col text)
|
||||||
ALTER TABLE table_triggers_schema.test_table OWNER TO postgres
|
ALTER TABLE table_triggers_schema.test_table OWNER TO postgres
|
||||||
CREATE TRIGGER test_table_delete AFTER DELETE ON table_triggers_schema.test_table FOR EACH STATEMENT EXECUTE FUNCTION table_triggers_schema.test_table_trigger_function()
|
CREATE TRIGGER test_table_delete AFTER DELETE ON table_triggers_schema.test_table FOR EACH STATEMENT EXECUTE FUNCTION table_triggers_schema.test_table_trigger_function()
|
||||||
|
ALTER TABLE table_triggers_schema.test_table ENABLE TRIGGER test_table_delete;
|
||||||
CREATE CONSTRAINT TRIGGER test_table_insert AFTER INSERT ON table_triggers_schema.test_table DEFERRABLE INITIALLY IMMEDIATE FOR EACH ROW WHEN (((new.id > 5) OR ((new.text_col IS NOT NULL) AND ((new.id)::numeric < to_number(new.text_number, '9999'::text))))) EXECUTE FUNCTION table_triggers_schema.test_table_trigger_function()
|
CREATE CONSTRAINT TRIGGER test_table_insert AFTER INSERT ON table_triggers_schema.test_table DEFERRABLE INITIALLY IMMEDIATE FOR EACH ROW WHEN (((new.id > 5) OR ((new.text_col IS NOT NULL) AND ((new.id)::numeric < to_number(new.text_number, '9999'::text))))) EXECUTE FUNCTION table_triggers_schema.test_table_trigger_function()
|
||||||
|
ALTER TABLE table_triggers_schema.test_table ENABLE TRIGGER test_table_insert;
|
||||||
CREATE CONSTRAINT TRIGGER test_table_update AFTER UPDATE OF id ON table_triggers_schema.test_table NOT DEFERRABLE INITIALLY IMMEDIATE FOR EACH ROW WHEN (((NOT (old.* IS DISTINCT FROM new.*)) AND (old.text_number IS NOT NULL))) EXECUTE FUNCTION table_triggers_schema.test_table_trigger_function()
|
CREATE CONSTRAINT TRIGGER test_table_update AFTER UPDATE OF id ON table_triggers_schema.test_table NOT DEFERRABLE INITIALLY IMMEDIATE FOR EACH ROW WHEN (((NOT (old.* IS DISTINCT FROM new.*)) AND (old.text_number IS NOT NULL))) EXECUTE FUNCTION table_triggers_schema.test_table_trigger_function()
|
||||||
(5 rows)
|
ALTER TABLE table_triggers_schema.test_table ENABLE TRIGGER test_table_update;
|
||||||
|
(8 rows)
|
||||||
|
|
||||||
-- cleanup at exit
|
-- cleanup at exit
|
||||||
DROP SCHEMA table_triggers_schema CASCADE;
|
DROP SCHEMA table_triggers_schema CASCADE;
|
||||||
|
|
|
@ -50,3 +50,28 @@ SELECT * FROM test_alter_table ORDER BY a;
|
||||||
(4 rows)
|
(4 rows)
|
||||||
|
|
||||||
DROP TABLE test_alter_table;
|
DROP TABLE test_alter_table;
|
||||||
|
-- Make sure that the correct table options are used when rewriting the table.
|
||||||
|
-- This is reflected by the VACUUM VERBOSE output right after a rewrite showing
|
||||||
|
-- that all chunks are compressed with the configured compression algorithm
|
||||||
|
-- https://github.com/citusdata/citus/issues/5927
|
||||||
|
CREATE TABLE test(i int) USING columnar;
|
||||||
|
ALTER TABLE test SET (columnar.compression = lz4);
|
||||||
|
INSERT INTO test VALUES(1);
|
||||||
|
VACUUM VERBOSE test;
|
||||||
|
INFO: statistics for "test":
|
||||||
|
storage id: xxxxx
|
||||||
|
total file size: 24576, total data size: 6
|
||||||
|
compression rate: 0.83x
|
||||||
|
total row count: 1, stripe count: 1, average rows per stripe: 1
|
||||||
|
chunk count: 1, containing data for dropped columns: 0, lz4 compressed: 1
|
||||||
|
|
||||||
|
ALTER TABLE test ALTER COLUMN i TYPE int8;
|
||||||
|
VACUUM VERBOSE test;
|
||||||
|
INFO: statistics for "test":
|
||||||
|
storage id: xxxxx
|
||||||
|
total file size: 24576, total data size: 10
|
||||||
|
compression rate: 0.90x
|
||||||
|
total row count: 1, stripe count: 1, average rows per stripe: 1
|
||||||
|
chunk count: 1, containing data for dropped columns: 0, lz4 compressed: 1
|
||||||
|
|
||||||
|
DROP TABLE test;
|
||||||
|
|
|
@ -57,6 +57,35 @@ ERROR: cannot colocate tables nocolo and test
|
||||||
DETAIL: Distribution column types don't match for nocolo and test.
|
DETAIL: Distribution column types don't match for nocolo and test.
|
||||||
select create_distributed_table_concurrently('test','key', colocate_with := 'noexists');
|
select create_distributed_table_concurrently('test','key', colocate_with := 'noexists');
|
||||||
ERROR: relation "noexists" does not exist
|
ERROR: relation "noexists" does not exist
|
||||||
|
select citus_set_node_property('localhost', :worker_1_port, 'shouldhaveshards', false);
|
||||||
|
citus_set_node_property
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
select citus_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', false);
|
||||||
|
citus_set_node_property
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
select create_distributed_table_concurrently('test','key');
|
||||||
|
NOTICE: relation test does not have a REPLICA IDENTITY or PRIMARY KEY
|
||||||
|
DETAIL: UPDATE and DELETE commands on the relation will error out during create_distributed_table_concurrently unless there is a REPLICA IDENTITY or PRIMARY KEY. INSERT commands will still work.
|
||||||
|
ERROR: no worker nodes are available for placing shards
|
||||||
|
HINT: Add more worker nodes.
|
||||||
|
select citus_set_node_property('localhost', :worker_1_port, 'shouldhaveshards', true);
|
||||||
|
citus_set_node_property
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
select citus_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', true);
|
||||||
|
citus_set_node_property
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
-- use colocate_with "default"
|
-- use colocate_with "default"
|
||||||
select create_distributed_table_concurrently('test','key', shard_count := 11);
|
select create_distributed_table_concurrently('test','key', shard_count := 11);
|
||||||
NOTICE: relation test does not have a REPLICA IDENTITY or PRIMARY KEY
|
NOTICE: relation test does not have a REPLICA IDENTITY or PRIMARY KEY
|
||||||
|
|
|
@ -463,29 +463,6 @@ DEBUG: Creating router plan
|
||||||
(5 rows)
|
(5 rows)
|
||||||
|
|
||||||
\set VERBOSITY default
|
\set VERBOSITY default
|
||||||
-- enable_group_by_reordering is a new GUC introduced in PG15
|
|
||||||
-- it does some optimization of the order of group by keys which results
|
|
||||||
-- in a different explain output plan between PG13/14 and PG15
|
|
||||||
-- Hence we set that GUC to off.
|
|
||||||
SHOW server_version \gset
|
|
||||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
|
||||||
\gset
|
|
||||||
\if :server_version_ge_15
|
|
||||||
SET enable_group_by_reordering TO off;
|
|
||||||
\endif
|
|
||||||
SELECT DISTINCT 1 FROM run_command_on_workers($$ALTER SYSTEM SET enable_group_by_reordering TO off;$$);
|
|
||||||
?column?
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
1
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
SELECT run_command_on_workers($$SELECT pg_reload_conf()$$);
|
|
||||||
run_command_on_workers
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
(localhost,57637,t,t)
|
|
||||||
(localhost,57638,t,t)
|
|
||||||
(2 rows)
|
|
||||||
|
|
||||||
EXPLAIN (COSTS OFF) WITH cte_1 AS NOT MATERIALIZED (SELECT * FROM test_table)
|
EXPLAIN (COSTS OFF) WITH cte_1 AS NOT MATERIALIZED (SELECT * FROM test_table)
|
||||||
SELECT
|
SELECT
|
||||||
count(*)
|
count(*)
|
||||||
|
@ -524,22 +501,6 @@ DEBUG: join prunable for intervals [1073741824,2147483647] and [0,1073741823]
|
||||||
-> Seq Scan on test_table_1960000 test_table_1
|
-> Seq Scan on test_table_1960000 test_table_1
|
||||||
(12 rows)
|
(12 rows)
|
||||||
|
|
||||||
\if :server_version_ge_15
|
|
||||||
RESET enable_group_by_reordering;
|
|
||||||
\endif
|
|
||||||
SELECT DISTINCT 1 FROM run_command_on_workers($$ALTER SYSTEM RESET enable_group_by_reordering;$$);
|
|
||||||
?column?
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
1
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
SELECT run_command_on_workers($$SELECT pg_reload_conf()$$);
|
|
||||||
run_command_on_workers
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
(localhost,57637,t,t)
|
|
||||||
(localhost,57638,t,t)
|
|
||||||
(2 rows)
|
|
||||||
|
|
||||||
-- ctes with volatile functions are not
|
-- ctes with volatile functions are not
|
||||||
-- inlined
|
-- inlined
|
||||||
WITH cte_1 AS (SELECT *, random() FROM test_table)
|
WITH cte_1 AS (SELECT *, random() FROM test_table)
|
||||||
|
|
|
@ -463,29 +463,6 @@ DEBUG: Creating router plan
|
||||||
(5 rows)
|
(5 rows)
|
||||||
|
|
||||||
\set VERBOSITY default
|
\set VERBOSITY default
|
||||||
-- enable_group_by_reordering is a new GUC introduced in PG15
|
|
||||||
-- it does some optimization of the order of group by keys which results
|
|
||||||
-- in a different explain output plan between PG13/14 and PG15
|
|
||||||
-- Hence we set that GUC to off.
|
|
||||||
SHOW server_version \gset
|
|
||||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
|
||||||
\gset
|
|
||||||
\if :server_version_ge_15
|
|
||||||
SET enable_group_by_reordering TO off;
|
|
||||||
\endif
|
|
||||||
SELECT DISTINCT 1 FROM run_command_on_workers($$ALTER SYSTEM SET enable_group_by_reordering TO off;$$);
|
|
||||||
?column?
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
1
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
SELECT run_command_on_workers($$SELECT pg_reload_conf()$$);
|
|
||||||
run_command_on_workers
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
(localhost,57637,t,t)
|
|
||||||
(localhost,57638,t,t)
|
|
||||||
(2 rows)
|
|
||||||
|
|
||||||
EXPLAIN (COSTS OFF) WITH cte_1 AS NOT MATERIALIZED (SELECT * FROM test_table)
|
EXPLAIN (COSTS OFF) WITH cte_1 AS NOT MATERIALIZED (SELECT * FROM test_table)
|
||||||
SELECT
|
SELECT
|
||||||
count(*)
|
count(*)
|
||||||
|
@ -524,22 +501,6 @@ DEBUG: join prunable for intervals [1073741824,2147483647] and [0,1073741823]
|
||||||
-> Seq Scan on test_table_1960000 test_table_1
|
-> Seq Scan on test_table_1960000 test_table_1
|
||||||
(12 rows)
|
(12 rows)
|
||||||
|
|
||||||
\if :server_version_ge_15
|
|
||||||
RESET enable_group_by_reordering;
|
|
||||||
\endif
|
|
||||||
SELECT DISTINCT 1 FROM run_command_on_workers($$ALTER SYSTEM RESET enable_group_by_reordering;$$);
|
|
||||||
?column?
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
1
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
SELECT run_command_on_workers($$SELECT pg_reload_conf()$$);
|
|
||||||
run_command_on_workers
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
(localhost,57637,t,t)
|
|
||||||
(localhost,57638,t,t)
|
|
||||||
(2 rows)
|
|
||||||
|
|
||||||
-- ctes with volatile functions are not
|
-- ctes with volatile functions are not
|
||||||
-- inlined
|
-- inlined
|
||||||
WITH cte_1 AS (SELECT *, random() FROM test_table)
|
WITH cte_1 AS (SELECT *, random() FROM test_table)
|
||||||
|
|
|
@ -855,7 +855,145 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_trigger WHERE tgname lik
|
||||||
(localhost,57638,t,1)
|
(localhost,57638,t,1)
|
||||||
(2 rows)
|
(2 rows)
|
||||||
|
|
||||||
RESET client_min_messages;
|
CREATE TABLE "dist_\'table"(a int);
|
||||||
|
CREATE FUNCTION trigger_func()
|
||||||
|
RETURNS trigger
|
||||||
|
LANGUAGE plpgsql
|
||||||
|
AS $function$
|
||||||
|
BEGIN
|
||||||
|
RETURN NULL;
|
||||||
|
END;
|
||||||
|
$function$;
|
||||||
|
CREATE TRIGGER default_mode_trigger
|
||||||
|
AFTER UPDATE OR DELETE ON "dist_\'table"
|
||||||
|
FOR STATEMENT EXECUTE FUNCTION trigger_func();
|
||||||
|
CREATE TRIGGER "disabled_trigger\'"
|
||||||
|
AFTER UPDATE OR DELETE ON "dist_\'table"
|
||||||
|
FOR STATEMENT EXECUTE FUNCTION trigger_func();
|
||||||
|
ALTER TABLE "dist_\'table" DISABLE trigger "disabled_trigger\'";
|
||||||
|
CREATE TRIGGER replica_trigger
|
||||||
|
AFTER UPDATE OR DELETE ON "dist_\'table"
|
||||||
|
FOR STATEMENT EXECUTE FUNCTION trigger_func();
|
||||||
|
ALTER TABLE "dist_\'table" ENABLE REPLICA trigger replica_trigger;
|
||||||
|
CREATE TRIGGER always_enabled_trigger
|
||||||
|
AFTER UPDATE OR DELETE ON "dist_\'table"
|
||||||
|
FOR STATEMENT EXECUTE FUNCTION trigger_func();
|
||||||
|
ALTER TABLE "dist_\'table" ENABLE ALWAYS trigger always_enabled_trigger;
|
||||||
|
CREATE TRIGGER noop_enabled_trigger
|
||||||
|
AFTER UPDATE OR DELETE ON "dist_\'table"
|
||||||
|
FOR STATEMENT EXECUTE FUNCTION trigger_func();
|
||||||
|
ALTER TABLE "dist_\'table" ENABLE trigger noop_enabled_trigger;
|
||||||
|
SELECT create_distributed_table('dist_\''table', 'a');
|
||||||
|
create_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT bool_and(tgenabled = 'O') FROM pg_trigger WHERE tgname LIKE 'default_mode_trigger%';
|
||||||
|
bool_and
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT run_command_on_workers($$SELECT bool_and(tgenabled = 'O') FROM pg_trigger WHERE tgname LIKE 'default_mode_trigger%'$$);
|
||||||
|
run_command_on_workers
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
(localhost,57637,t,t)
|
||||||
|
(localhost,57638,t,t)
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
SELECT bool_and(tgenabled = 'D') FROM pg_trigger WHERE tgname LIKE 'disabled_trigger%';
|
||||||
|
bool_and
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT run_command_on_workers($$SELECT bool_and(tgenabled = 'D') FROM pg_trigger WHERE tgname LIKE 'disabled_trigger%'$$);
|
||||||
|
run_command_on_workers
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
(localhost,57637,t,t)
|
||||||
|
(localhost,57638,t,t)
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
SELECT bool_and(tgenabled = 'R') FROM pg_trigger WHERE tgname LIKE 'replica_trigger%';
|
||||||
|
bool_and
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT run_command_on_workers($$SELECT bool_and(tgenabled = 'R') FROM pg_trigger WHERE tgname LIKE 'replica_trigger%'$$);
|
||||||
|
run_command_on_workers
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
(localhost,57637,t,t)
|
||||||
|
(localhost,57638,t,t)
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
SELECT bool_and(tgenabled = 'A') FROM pg_trigger WHERE tgname LIKE 'always_enabled_trigger%';
|
||||||
|
bool_and
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT run_command_on_workers($$SELECT bool_and(tgenabled = 'A') FROM pg_trigger WHERE tgname LIKE 'always_enabled_trigger%'$$);
|
||||||
|
run_command_on_workers
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
(localhost,57637,t,t)
|
||||||
|
(localhost,57638,t,t)
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
SELECT bool_and(tgenabled = 'O') FROM pg_trigger WHERE tgname LIKE 'noop_enabled_trigger%';
|
||||||
|
bool_and
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT run_command_on_workers($$SELECT bool_and(tgenabled = 'O') FROM pg_trigger WHERE tgname LIKE 'noop_enabled_trigger%'$$);
|
||||||
|
run_command_on_workers
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
(localhost,57637,t,t)
|
||||||
|
(localhost,57638,t,t)
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
CREATE TABLE citus_local(a int);
|
||||||
|
CREATE FUNCTION citus_local_trig_func()
|
||||||
|
RETURNS trigger
|
||||||
|
LANGUAGE plpgsql
|
||||||
|
AS $function$
|
||||||
|
BEGIN
|
||||||
|
RETURN NULL;
|
||||||
|
END;
|
||||||
|
$function$;
|
||||||
|
CREATE TRIGGER citus_local_trig
|
||||||
|
AFTER UPDATE OR DELETE ON citus_local
|
||||||
|
FOR STATEMENT EXECUTE FUNCTION citus_local_trig_func();
|
||||||
|
-- make sure that trigger is initially not disabled
|
||||||
|
SELECT tgenabled = 'D' FROM pg_trigger WHERE tgname LIKE 'citus_local_trig%';
|
||||||
|
?column?
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
f
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
ALTER TABLE citus_local DISABLE trigger citus_local_trig;
|
||||||
|
SELECT citus_add_local_table_to_metadata('citus_local');
|
||||||
|
citus_add_local_table_to_metadata
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT bool_and(tgenabled = 'D') FROM pg_trigger WHERE tgname LIKE 'citus_local_trig%';
|
||||||
|
bool_and
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT run_command_on_workers($$SELECT bool_and(tgenabled = 'D') FROM pg_trigger WHERE tgname LIKE 'citus_local_trig%'$$);
|
||||||
|
run_command_on_workers
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
(localhost,57637,t,t)
|
||||||
|
(localhost,57638,t,t)
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
SET client_min_messages TO ERROR;
|
||||||
RESET citus.enable_unsafe_triggers;
|
RESET citus.enable_unsafe_triggers;
|
||||||
SELECT run_command_on_workers('ALTER SYSTEM RESET citus.enable_unsafe_triggers;');
|
SELECT run_command_on_workers('ALTER SYSTEM RESET citus.enable_unsafe_triggers;');
|
||||||
run_command_on_workers
|
run_command_on_workers
|
||||||
|
@ -873,25 +1011,3 @@ SELECT run_command_on_workers('SELECT pg_reload_conf();');
|
||||||
|
|
||||||
SET citus.log_remote_commands TO off;
|
SET citus.log_remote_commands TO off;
|
||||||
DROP SCHEMA distributed_triggers CASCADE;
|
DROP SCHEMA distributed_triggers CASCADE;
|
||||||
NOTICE: drop cascades to 21 other objects
|
|
||||||
DETAIL: drop cascades to table data
|
|
||||||
drop cascades to function record_change()
|
|
||||||
drop cascades to function insert_delete_document(text,text)
|
|
||||||
drop cascades to function bad_shardkey_record_change()
|
|
||||||
drop cascades to function remote_shardkey_record_change()
|
|
||||||
drop cascades to function insert_document(text,text)
|
|
||||||
drop cascades to table emptest
|
|
||||||
drop cascades to table emptest_audit
|
|
||||||
drop cascades to function process_emp_audit()
|
|
||||||
drop cascades to view emp_triggers
|
|
||||||
drop cascades to table record_op
|
|
||||||
drop cascades to function record_emp()
|
|
||||||
drop cascades to table data_changes
|
|
||||||
drop cascades to table sale
|
|
||||||
drop cascades to table record_sale
|
|
||||||
drop cascades to function record_sale()
|
|
||||||
drop cascades to view sale_triggers
|
|
||||||
drop cascades to extension seg
|
|
||||||
drop cascades to table distributed_table
|
|
||||||
drop cascades to table distributed_table_change
|
|
||||||
drop cascades to function insert_99()
|
|
||||||
|
|
|
@ -41,8 +41,13 @@ SELECT * FROM shards_in_workers;
|
||||||
103 | worker1
|
103 | worker1
|
||||||
(4 rows)
|
(4 rows)
|
||||||
|
|
||||||
-- failure on creating the subscription
|
-- Failure on creating the subscription
|
||||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE SUBSCRIPTION").kill()');
|
-- Failing exactly on CREATE SUBSCRIPTION is causing flaky test where we fail with either:
|
||||||
|
-- 1) ERROR: connection to the remote node localhost:xxxxx failed with the following error: ERROR: subscription "citus_shard_move_subscription_xxxxxxx" does not exist
|
||||||
|
-- another command is already in progress
|
||||||
|
-- 2) ERROR: connection to the remote node localhost:xxxxx failed with the following error: another command is already in progress
|
||||||
|
-- Instead fail on the next step (ALTER SUBSCRIPTION) instead which is also required logically as part of uber CREATE SUBSCRIPTION operation.
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="ALTER SUBSCRIPTION").kill()');
|
||||||
mitmproxy
|
mitmproxy
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
|
@ -239,7 +239,7 @@ master_remove_node
|
||||||
step s2-create-citus-local-table-1: SELECT citus_add_local_table_to_metadata('citus_local_table_1'); <waiting ...>
|
step s2-create-citus-local-table-1: SELECT citus_add_local_table_to_metadata('citus_local_table_1'); <waiting ...>
|
||||||
step s1-commit: COMMIT;
|
step s1-commit: COMMIT;
|
||||||
step s2-create-citus-local-table-1: <... completed>
|
step s2-create-citus-local-table-1: <... completed>
|
||||||
ERROR: could not find the coordinator node in metadata as it is not added as a worker
|
ERROR: operation is not allowed when coordinator is not added into metadata
|
||||||
step s2-commit: COMMIT;
|
step s2-commit: COMMIT;
|
||||||
master_remove_node
|
master_remove_node
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
|
|
|
@ -0,0 +1,667 @@
|
||||||
|
Parsed test spec with 3 sessions
|
||||||
|
|
||||||
|
starting permutation: s2-print-cluster-1 s3-acquire-advisory-lock s2-begin s1-alter-table s1-set-factor-1 s1-create-distributed-table-observations_with_pk-concurrently s2-insert-observations_with_pk s2-update-observations_with_pk s2-end s2-print-cluster-1 s3-release-advisory-lock s2-print-cluster-1
|
||||||
|
step s2-print-cluster-1:
|
||||||
|
-- row count per shard
|
||||||
|
SELECT
|
||||||
|
nodeport, shardid, success, result
|
||||||
|
FROM
|
||||||
|
run_command_on_placements('observations_with_pk', 'select count(*) from %s')
|
||||||
|
ORDER BY
|
||||||
|
nodeport, shardid;
|
||||||
|
SELECT *
|
||||||
|
FROM
|
||||||
|
observations_with_pk
|
||||||
|
ORDER BY
|
||||||
|
measurement_id;
|
||||||
|
|
||||||
|
nodeport|shardid|success|result
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
(0 rows)
|
||||||
|
|
||||||
|
tenant_id|dummy|measurement_id|payload|observation_time
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
(0 rows)
|
||||||
|
|
||||||
|
step s3-acquire-advisory-lock:
|
||||||
|
SELECT pg_advisory_lock(44000, 55152);
|
||||||
|
|
||||||
|
pg_advisory_lock
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
step s2-begin:
|
||||||
|
BEGIN;
|
||||||
|
|
||||||
|
step s1-alter-table:
|
||||||
|
ALTER TABLE observations_with_pk DROP COLUMN dummy;
|
||||||
|
ALTER TABLE observations_with_full_replica_identity DROP COLUMN dummy;
|
||||||
|
|
||||||
|
step s1-set-factor-1:
|
||||||
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SELECT citus_set_coordinator_host('localhost');
|
||||||
|
|
||||||
|
citus_set_coordinator_host
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
step s1-create-distributed-table-observations_with_pk-concurrently:
|
||||||
|
SELECT create_distributed_table_concurrently('observations_with_pk','tenant_id');
|
||||||
|
<waiting ...>
|
||||||
|
step s2-insert-observations_with_pk:
|
||||||
|
INSERT INTO observations_with_pk(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||||
|
INSERT INTO observations_with_pk(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||||
|
INSERT INTO observations_with_pk(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||||
|
INSERT INTO observations_with_pk(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||||
|
|
||||||
|
step s2-update-observations_with_pk:
|
||||||
|
UPDATE observations_with_pk set observation_time='03/11/2019 02:00:00'::TIMESTAMP where tenant_id = 'tenant_id' and measurement_id = 3;
|
||||||
|
|
||||||
|
step s2-end:
|
||||||
|
COMMIT;
|
||||||
|
|
||||||
|
step s2-print-cluster-1:
|
||||||
|
-- row count per shard
|
||||||
|
SELECT
|
||||||
|
nodeport, shardid, success, result
|
||||||
|
FROM
|
||||||
|
run_command_on_placements('observations_with_pk', 'select count(*) from %s')
|
||||||
|
ORDER BY
|
||||||
|
nodeport, shardid;
|
||||||
|
SELECT *
|
||||||
|
FROM
|
||||||
|
observations_with_pk
|
||||||
|
ORDER BY
|
||||||
|
measurement_id;
|
||||||
|
|
||||||
|
nodeport|shardid|success|result
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
57636|1500004|t | 4
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
tenant_id|measurement_id|payload |observation_time
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
tenant_id| 1|{"name": 29.3}|Sun Mar 11 03:00:00 2018 PDT
|
||||||
|
tenant_id| 2|{"name": 29.3}|Sun Mar 11 03:00:00 2018 PDT
|
||||||
|
tenant_id| 3|{"name": 29.3}|Mon Mar 11 02:00:00 2019 PDT
|
||||||
|
tenant_id| 4|{"name": 29.3}|Sun Mar 11 03:00:00 2018 PDT
|
||||||
|
(4 rows)
|
||||||
|
|
||||||
|
step s3-release-advisory-lock:
|
||||||
|
SELECT pg_advisory_unlock(44000, 55152);
|
||||||
|
|
||||||
|
pg_advisory_unlock
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
step s1-create-distributed-table-observations_with_pk-concurrently: <... completed>
|
||||||
|
create_distributed_table_concurrently
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
step s2-print-cluster-1:
|
||||||
|
-- row count per shard
|
||||||
|
SELECT
|
||||||
|
nodeport, shardid, success, result
|
||||||
|
FROM
|
||||||
|
run_command_on_placements('observations_with_pk', 'select count(*) from %s')
|
||||||
|
ORDER BY
|
||||||
|
nodeport, shardid;
|
||||||
|
SELECT *
|
||||||
|
FROM
|
||||||
|
observations_with_pk
|
||||||
|
ORDER BY
|
||||||
|
measurement_id;
|
||||||
|
|
||||||
|
nodeport|shardid|success|result
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
57637|1500006|t | 4
|
||||||
|
57637|1500008|t | 0
|
||||||
|
57638|1500005|t | 0
|
||||||
|
57638|1500007|t | 0
|
||||||
|
(4 rows)
|
||||||
|
|
||||||
|
tenant_id|measurement_id|payload |observation_time
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
tenant_id| 1|{"name": 29.3}|Sun Mar 11 03:00:00 2018 PDT
|
||||||
|
tenant_id| 2|{"name": 29.3}|Sun Mar 11 03:00:00 2018 PDT
|
||||||
|
tenant_id| 3|{"name": 29.3}|Mon Mar 11 02:00:00 2019 PDT
|
||||||
|
tenant_id| 4|{"name": 29.3}|Sun Mar 11 03:00:00 2018 PDT
|
||||||
|
(4 rows)
|
||||||
|
|
||||||
|
|
||||||
|
starting permutation: s2-print-cluster-1 s3-acquire-advisory-lock s2-begin s1-alter-table s1-set-factor-1 s1-create-distributed-table-observations_with_pk-concurrently s2-insert-observations_with_pk s2-update-primary-key-observations_with_pk s2-end s2-print-cluster-1 s3-release-advisory-lock s2-print-cluster-1
|
||||||
|
step s2-print-cluster-1:
|
||||||
|
-- row count per shard
|
||||||
|
SELECT
|
||||||
|
nodeport, shardid, success, result
|
||||||
|
FROM
|
||||||
|
run_command_on_placements('observations_with_pk', 'select count(*) from %s')
|
||||||
|
ORDER BY
|
||||||
|
nodeport, shardid;
|
||||||
|
SELECT *
|
||||||
|
FROM
|
||||||
|
observations_with_pk
|
||||||
|
ORDER BY
|
||||||
|
measurement_id;
|
||||||
|
|
||||||
|
nodeport|shardid|success|result
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
(0 rows)
|
||||||
|
|
||||||
|
tenant_id|dummy|measurement_id|payload|observation_time
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
(0 rows)
|
||||||
|
|
||||||
|
step s3-acquire-advisory-lock:
|
||||||
|
SELECT pg_advisory_lock(44000, 55152);
|
||||||
|
|
||||||
|
pg_advisory_lock
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
step s2-begin:
|
||||||
|
BEGIN;
|
||||||
|
|
||||||
|
step s1-alter-table:
|
||||||
|
ALTER TABLE observations_with_pk DROP COLUMN dummy;
|
||||||
|
ALTER TABLE observations_with_full_replica_identity DROP COLUMN dummy;
|
||||||
|
|
||||||
|
step s1-set-factor-1:
|
||||||
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SELECT citus_set_coordinator_host('localhost');
|
||||||
|
|
||||||
|
citus_set_coordinator_host
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
step s1-create-distributed-table-observations_with_pk-concurrently:
|
||||||
|
SELECT create_distributed_table_concurrently('observations_with_pk','tenant_id');
|
||||||
|
<waiting ...>
|
||||||
|
step s2-insert-observations_with_pk:
|
||||||
|
INSERT INTO observations_with_pk(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||||
|
INSERT INTO observations_with_pk(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||||
|
INSERT INTO observations_with_pk(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||||
|
INSERT INTO observations_with_pk(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||||
|
|
||||||
|
step s2-update-primary-key-observations_with_pk:
|
||||||
|
UPDATE observations_with_pk set measurement_id=100 where tenant_id = 'tenant_id' and measurement_id = 4 ;
|
||||||
|
|
||||||
|
step s2-end:
|
||||||
|
COMMIT;
|
||||||
|
|
||||||
|
step s2-print-cluster-1:
|
||||||
|
-- row count per shard
|
||||||
|
SELECT
|
||||||
|
nodeport, shardid, success, result
|
||||||
|
FROM
|
||||||
|
run_command_on_placements('observations_with_pk', 'select count(*) from %s')
|
||||||
|
ORDER BY
|
||||||
|
nodeport, shardid;
|
||||||
|
SELECT *
|
||||||
|
FROM
|
||||||
|
observations_with_pk
|
||||||
|
ORDER BY
|
||||||
|
measurement_id;
|
||||||
|
|
||||||
|
nodeport|shardid|success|result
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
57636|1500009|t | 4
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
tenant_id|measurement_id|payload |observation_time
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
tenant_id| 1|{"name": 29.3}|Sun Mar 11 03:00:00 2018 PDT
|
||||||
|
tenant_id| 2|{"name": 29.3}|Sun Mar 11 03:00:00 2018 PDT
|
||||||
|
tenant_id| 3|{"name": 29.3}|Sun Mar 11 03:00:00 2018 PDT
|
||||||
|
tenant_id| 100|{"name": 29.3}|Sun Mar 11 03:00:00 2018 PDT
|
||||||
|
(4 rows)
|
||||||
|
|
||||||
|
step s3-release-advisory-lock:
|
||||||
|
SELECT pg_advisory_unlock(44000, 55152);
|
||||||
|
|
||||||
|
pg_advisory_unlock
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
step s1-create-distributed-table-observations_with_pk-concurrently: <... completed>
|
||||||
|
create_distributed_table_concurrently
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
step s2-print-cluster-1:
|
||||||
|
-- row count per shard
|
||||||
|
SELECT
|
||||||
|
nodeport, shardid, success, result
|
||||||
|
FROM
|
||||||
|
run_command_on_placements('observations_with_pk', 'select count(*) from %s')
|
||||||
|
ORDER BY
|
||||||
|
nodeport, shardid;
|
||||||
|
SELECT *
|
||||||
|
FROM
|
||||||
|
observations_with_pk
|
||||||
|
ORDER BY
|
||||||
|
measurement_id;
|
||||||
|
|
||||||
|
nodeport|shardid|success|result
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
57637|1500011|t | 4
|
||||||
|
57637|1500013|t | 0
|
||||||
|
57638|1500010|t | 0
|
||||||
|
57638|1500012|t | 0
|
||||||
|
(4 rows)
|
||||||
|
|
||||||
|
tenant_id|measurement_id|payload |observation_time
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
tenant_id| 1|{"name": 29.3}|Sun Mar 11 03:00:00 2018 PDT
|
||||||
|
tenant_id| 2|{"name": 29.3}|Sun Mar 11 03:00:00 2018 PDT
|
||||||
|
tenant_id| 3|{"name": 29.3}|Sun Mar 11 03:00:00 2018 PDT
|
||||||
|
tenant_id| 100|{"name": 29.3}|Sun Mar 11 03:00:00 2018 PDT
|
||||||
|
(4 rows)
|
||||||
|
|
||||||
|
|
||||||
|
starting permutation: s2-print-cluster-1 s3-acquire-advisory-lock s2-begin s1-alter-table s1-set-factor-1 s1-create-distributed-table-observations_with_pk-concurrently s2-insert-observations_with_pk s2-update-observations_with_pk s2-delete-observations_with_pk s2-end s2-print-cluster-1 s3-release-advisory-lock s2-print-cluster-1
|
||||||
|
step s2-print-cluster-1:
|
||||||
|
-- row count per shard
|
||||||
|
SELECT
|
||||||
|
nodeport, shardid, success, result
|
||||||
|
FROM
|
||||||
|
run_command_on_placements('observations_with_pk', 'select count(*) from %s')
|
||||||
|
ORDER BY
|
||||||
|
nodeport, shardid;
|
||||||
|
SELECT *
|
||||||
|
FROM
|
||||||
|
observations_with_pk
|
||||||
|
ORDER BY
|
||||||
|
measurement_id;
|
||||||
|
|
||||||
|
nodeport|shardid|success|result
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
(0 rows)
|
||||||
|
|
||||||
|
tenant_id|dummy|measurement_id|payload|observation_time
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
(0 rows)
|
||||||
|
|
||||||
|
step s3-acquire-advisory-lock:
|
||||||
|
SELECT pg_advisory_lock(44000, 55152);
|
||||||
|
|
||||||
|
pg_advisory_lock
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
step s2-begin:
|
||||||
|
BEGIN;
|
||||||
|
|
||||||
|
step s1-alter-table:
|
||||||
|
ALTER TABLE observations_with_pk DROP COLUMN dummy;
|
||||||
|
ALTER TABLE observations_with_full_replica_identity DROP COLUMN dummy;
|
||||||
|
|
||||||
|
step s1-set-factor-1:
|
||||||
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SELECT citus_set_coordinator_host('localhost');
|
||||||
|
|
||||||
|
citus_set_coordinator_host
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
step s1-create-distributed-table-observations_with_pk-concurrently:
|
||||||
|
SELECT create_distributed_table_concurrently('observations_with_pk','tenant_id');
|
||||||
|
<waiting ...>
|
||||||
|
step s2-insert-observations_with_pk:
|
||||||
|
INSERT INTO observations_with_pk(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||||
|
INSERT INTO observations_with_pk(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||||
|
INSERT INTO observations_with_pk(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||||
|
INSERT INTO observations_with_pk(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||||
|
|
||||||
|
step s2-update-observations_with_pk:
|
||||||
|
UPDATE observations_with_pk set observation_time='03/11/2019 02:00:00'::TIMESTAMP where tenant_id = 'tenant_id' and measurement_id = 3;
|
||||||
|
|
||||||
|
step s2-delete-observations_with_pk:
|
||||||
|
DELETE FROM observations_with_pk where tenant_id = 'tenant_id' and measurement_id = 3 ;
|
||||||
|
|
||||||
|
step s2-end:
|
||||||
|
COMMIT;
|
||||||
|
|
||||||
|
step s2-print-cluster-1:
|
||||||
|
-- row count per shard
|
||||||
|
SELECT
|
||||||
|
nodeport, shardid, success, result
|
||||||
|
FROM
|
||||||
|
run_command_on_placements('observations_with_pk', 'select count(*) from %s')
|
||||||
|
ORDER BY
|
||||||
|
nodeport, shardid;
|
||||||
|
SELECT *
|
||||||
|
FROM
|
||||||
|
observations_with_pk
|
||||||
|
ORDER BY
|
||||||
|
measurement_id;
|
||||||
|
|
||||||
|
nodeport|shardid|success|result
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
57636|1500014|t | 3
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
tenant_id|measurement_id|payload |observation_time
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
tenant_id| 1|{"name": 29.3}|Sun Mar 11 03:00:00 2018 PDT
|
||||||
|
tenant_id| 2|{"name": 29.3}|Sun Mar 11 03:00:00 2018 PDT
|
||||||
|
tenant_id| 4|{"name": 29.3}|Sun Mar 11 03:00:00 2018 PDT
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
step s3-release-advisory-lock:
|
||||||
|
SELECT pg_advisory_unlock(44000, 55152);
|
||||||
|
|
||||||
|
pg_advisory_unlock
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
step s1-create-distributed-table-observations_with_pk-concurrently: <... completed>
|
||||||
|
create_distributed_table_concurrently
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
step s2-print-cluster-1:
|
||||||
|
-- row count per shard
|
||||||
|
SELECT
|
||||||
|
nodeport, shardid, success, result
|
||||||
|
FROM
|
||||||
|
run_command_on_placements('observations_with_pk', 'select count(*) from %s')
|
||||||
|
ORDER BY
|
||||||
|
nodeport, shardid;
|
||||||
|
SELECT *
|
||||||
|
FROM
|
||||||
|
observations_with_pk
|
||||||
|
ORDER BY
|
||||||
|
measurement_id;
|
||||||
|
|
||||||
|
nodeport|shardid|success|result
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
57637|1500016|t | 3
|
||||||
|
57637|1500018|t | 0
|
||||||
|
57638|1500015|t | 0
|
||||||
|
57638|1500017|t | 0
|
||||||
|
(4 rows)
|
||||||
|
|
||||||
|
tenant_id|measurement_id|payload |observation_time
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
tenant_id| 1|{"name": 29.3}|Sun Mar 11 03:00:00 2018 PDT
|
||||||
|
tenant_id| 2|{"name": 29.3}|Sun Mar 11 03:00:00 2018 PDT
|
||||||
|
tenant_id| 4|{"name": 29.3}|Sun Mar 11 03:00:00 2018 PDT
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
|
||||||
|
starting permutation: s2-print-cluster-2 s3-acquire-advisory-lock s2-begin s1-alter-table s1-set-factor-1 s1-create-distributed-table-observations-2-concurrently s2-insert-observations_with_full_replica_identity s2-update-observations_with_full_replica_identity s2-end s2-print-cluster-2 s3-release-advisory-lock s2-print-cluster-2
|
||||||
|
step s2-print-cluster-2:
|
||||||
|
-- row count per shard
|
||||||
|
SELECT
|
||||||
|
nodeport, shardid, success, result
|
||||||
|
FROM
|
||||||
|
run_command_on_placements('observations_with_full_replica_identity', 'select count(*) from %s')
|
||||||
|
ORDER BY
|
||||||
|
nodeport, shardid;
|
||||||
|
SELECT *
|
||||||
|
FROM
|
||||||
|
observations_with_full_replica_identity
|
||||||
|
ORDER BY
|
||||||
|
measurement_id;
|
||||||
|
|
||||||
|
nodeport|shardid|success|result
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
(0 rows)
|
||||||
|
|
||||||
|
tenant_id|dummy|measurement_id|payload|observation_time
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
(0 rows)
|
||||||
|
|
||||||
|
step s3-acquire-advisory-lock:
|
||||||
|
SELECT pg_advisory_lock(44000, 55152);
|
||||||
|
|
||||||
|
pg_advisory_lock
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
step s2-begin:
|
||||||
|
BEGIN;
|
||||||
|
|
||||||
|
step s1-alter-table:
|
||||||
|
ALTER TABLE observations_with_pk DROP COLUMN dummy;
|
||||||
|
ALTER TABLE observations_with_full_replica_identity DROP COLUMN dummy;
|
||||||
|
|
||||||
|
step s1-set-factor-1:
|
||||||
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SELECT citus_set_coordinator_host('localhost');
|
||||||
|
|
||||||
|
citus_set_coordinator_host
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
step s1-create-distributed-table-observations-2-concurrently:
|
||||||
|
SELECT create_distributed_table_concurrently('observations_with_full_replica_identity','tenant_id');
|
||||||
|
<waiting ...>
|
||||||
|
step s2-insert-observations_with_full_replica_identity:
|
||||||
|
INSERT INTO observations_with_full_replica_identity(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||||
|
INSERT INTO observations_with_full_replica_identity(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||||
|
INSERT INTO observations_with_full_replica_identity(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||||
|
|
||||||
|
step s2-update-observations_with_full_replica_identity:
|
||||||
|
UPDATE observations_with_full_replica_identity set observation_time='03/11/2019 02:00:00'::TIMESTAMP where tenant_id = 'tenant_id' and measurement_id = 3;
|
||||||
|
|
||||||
|
step s2-end:
|
||||||
|
COMMIT;
|
||||||
|
|
||||||
|
step s2-print-cluster-2:
|
||||||
|
-- row count per shard
|
||||||
|
SELECT
|
||||||
|
nodeport, shardid, success, result
|
||||||
|
FROM
|
||||||
|
run_command_on_placements('observations_with_full_replica_identity', 'select count(*) from %s')
|
||||||
|
ORDER BY
|
||||||
|
nodeport, shardid;
|
||||||
|
SELECT *
|
||||||
|
FROM
|
||||||
|
observations_with_full_replica_identity
|
||||||
|
ORDER BY
|
||||||
|
measurement_id;
|
||||||
|
|
||||||
|
nodeport|shardid|success|result
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
57636|1500019|t | 3
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
tenant_id|measurement_id|payload |observation_time
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
tenant_id| 1|{"name": 29.3}|Sun Mar 11 03:00:00 2018 PDT
|
||||||
|
tenant_id| 2|{"name": 29.3}|Sun Mar 11 03:00:00 2018 PDT
|
||||||
|
tenant_id| 3|{"name": 29.3}|Mon Mar 11 02:00:00 2019 PDT
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
step s3-release-advisory-lock:
|
||||||
|
SELECT pg_advisory_unlock(44000, 55152);
|
||||||
|
|
||||||
|
pg_advisory_unlock
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
step s1-create-distributed-table-observations-2-concurrently: <... completed>
|
||||||
|
create_distributed_table_concurrently
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
step s2-print-cluster-2:
|
||||||
|
-- row count per shard
|
||||||
|
SELECT
|
||||||
|
nodeport, shardid, success, result
|
||||||
|
FROM
|
||||||
|
run_command_on_placements('observations_with_full_replica_identity', 'select count(*) from %s')
|
||||||
|
ORDER BY
|
||||||
|
nodeport, shardid;
|
||||||
|
SELECT *
|
||||||
|
FROM
|
||||||
|
observations_with_full_replica_identity
|
||||||
|
ORDER BY
|
||||||
|
measurement_id;
|
||||||
|
|
||||||
|
nodeport|shardid|success|result
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
57637|1500021|t | 3
|
||||||
|
57637|1500023|t | 0
|
||||||
|
57638|1500020|t | 0
|
||||||
|
57638|1500022|t | 0
|
||||||
|
(4 rows)
|
||||||
|
|
||||||
|
tenant_id|measurement_id|payload |observation_time
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
tenant_id| 1|{"name": 29.3}|Sun Mar 11 03:00:00 2018 PDT
|
||||||
|
tenant_id| 2|{"name": 29.3}|Sun Mar 11 03:00:00 2018 PDT
|
||||||
|
tenant_id| 3|{"name": 29.3}|Mon Mar 11 02:00:00 2019 PDT
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
|
||||||
|
starting permutation: s2-print-cluster-2 s3-acquire-advisory-lock s2-begin s1-alter-table s1-set-factor-1 s1-create-distributed-table-observations-2-concurrently s2-insert-observations_with_full_replica_identity s2-update-observations_with_full_replica_identity s2-delete-observations_with_full_replica_identity s2-end s2-print-cluster-2 s3-release-advisory-lock s2-print-cluster-2
|
||||||
|
step s2-print-cluster-2:
|
||||||
|
-- row count per shard
|
||||||
|
SELECT
|
||||||
|
nodeport, shardid, success, result
|
||||||
|
FROM
|
||||||
|
run_command_on_placements('observations_with_full_replica_identity', 'select count(*) from %s')
|
||||||
|
ORDER BY
|
||||||
|
nodeport, shardid;
|
||||||
|
SELECT *
|
||||||
|
FROM
|
||||||
|
observations_with_full_replica_identity
|
||||||
|
ORDER BY
|
||||||
|
measurement_id;
|
||||||
|
|
||||||
|
nodeport|shardid|success|result
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
(0 rows)
|
||||||
|
|
||||||
|
tenant_id|dummy|measurement_id|payload|observation_time
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
(0 rows)
|
||||||
|
|
||||||
|
step s3-acquire-advisory-lock:
|
||||||
|
SELECT pg_advisory_lock(44000, 55152);
|
||||||
|
|
||||||
|
pg_advisory_lock
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
step s2-begin:
|
||||||
|
BEGIN;
|
||||||
|
|
||||||
|
step s1-alter-table:
|
||||||
|
ALTER TABLE observations_with_pk DROP COLUMN dummy;
|
||||||
|
ALTER TABLE observations_with_full_replica_identity DROP COLUMN dummy;
|
||||||
|
|
||||||
|
step s1-set-factor-1:
|
||||||
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SELECT citus_set_coordinator_host('localhost');
|
||||||
|
|
||||||
|
citus_set_coordinator_host
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
step s1-create-distributed-table-observations-2-concurrently:
|
||||||
|
SELECT create_distributed_table_concurrently('observations_with_full_replica_identity','tenant_id');
|
||||||
|
<waiting ...>
|
||||||
|
step s2-insert-observations_with_full_replica_identity:
|
||||||
|
INSERT INTO observations_with_full_replica_identity(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||||
|
INSERT INTO observations_with_full_replica_identity(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||||
|
INSERT INTO observations_with_full_replica_identity(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||||
|
|
||||||
|
step s2-update-observations_with_full_replica_identity:
|
||||||
|
UPDATE observations_with_full_replica_identity set observation_time='03/11/2019 02:00:00'::TIMESTAMP where tenant_id = 'tenant_id' and measurement_id = 3;
|
||||||
|
|
||||||
|
step s2-delete-observations_with_full_replica_identity:
|
||||||
|
DELETE FROM observations_with_full_replica_identity where tenant_id = 'tenant_id' and measurement_id = 3 ;
|
||||||
|
|
||||||
|
step s2-end:
|
||||||
|
COMMIT;
|
||||||
|
|
||||||
|
step s2-print-cluster-2:
|
||||||
|
-- row count per shard
|
||||||
|
SELECT
|
||||||
|
nodeport, shardid, success, result
|
||||||
|
FROM
|
||||||
|
run_command_on_placements('observations_with_full_replica_identity', 'select count(*) from %s')
|
||||||
|
ORDER BY
|
||||||
|
nodeport, shardid;
|
||||||
|
SELECT *
|
||||||
|
FROM
|
||||||
|
observations_with_full_replica_identity
|
||||||
|
ORDER BY
|
||||||
|
measurement_id;
|
||||||
|
|
||||||
|
nodeport|shardid|success|result
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
57636|1500024|t | 2
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
tenant_id|measurement_id|payload |observation_time
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
tenant_id| 1|{"name": 29.3}|Sun Mar 11 03:00:00 2018 PDT
|
||||||
|
tenant_id| 2|{"name": 29.3}|Sun Mar 11 03:00:00 2018 PDT
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
step s3-release-advisory-lock:
|
||||||
|
SELECT pg_advisory_unlock(44000, 55152);
|
||||||
|
|
||||||
|
pg_advisory_unlock
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
step s1-create-distributed-table-observations-2-concurrently: <... completed>
|
||||||
|
create_distributed_table_concurrently
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
step s2-print-cluster-2:
|
||||||
|
-- row count per shard
|
||||||
|
SELECT
|
||||||
|
nodeport, shardid, success, result
|
||||||
|
FROM
|
||||||
|
run_command_on_placements('observations_with_full_replica_identity', 'select count(*) from %s')
|
||||||
|
ORDER BY
|
||||||
|
nodeport, shardid;
|
||||||
|
SELECT *
|
||||||
|
FROM
|
||||||
|
observations_with_full_replica_identity
|
||||||
|
ORDER BY
|
||||||
|
measurement_id;
|
||||||
|
|
||||||
|
nodeport|shardid|success|result
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
57637|1500026|t | 2
|
||||||
|
57637|1500028|t | 0
|
||||||
|
57638|1500025|t | 0
|
||||||
|
57638|1500027|t | 0
|
||||||
|
(4 rows)
|
||||||
|
|
||||||
|
tenant_id|measurement_id|payload |observation_time
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
tenant_id| 1|{"name": 29.3}|Sun Mar 11 03:00:00 2018 PDT
|
||||||
|
tenant_id| 2|{"name": 29.3}|Sun Mar 11 03:00:00 2018 PDT
|
||||||
|
(2 rows)
|
||||||
|
|
|
@ -0,0 +1,170 @@
|
||||||
|
Parsed test spec with 3 sessions
|
||||||
|
|
||||||
|
starting permutation: s1-table-owner-new_user s1-table-enable-rls s1-get-shard-distribution s1-user-spec s3-acquire-advisory-lock s1-begin s1-set-role s1-move-placement s2-insert s3-release-advisory-lock s1-reset-role s1-end s1-select s1-get-shard-distribution
|
||||||
|
step s1-table-owner-new_user:
|
||||||
|
ALTER TABLE dist OWNER TO new_user;
|
||||||
|
|
||||||
|
step s1-table-enable-rls:
|
||||||
|
ALTER TABLE dist ENABLE ROW LEVEL SECURITY;
|
||||||
|
|
||||||
|
step s1-get-shard-distribution:
|
||||||
|
SELECT shardid, nodeport FROM pg_dist_placement INNER JOIN pg_dist_node ON (pg_dist_placement.groupid = pg_dist_node.groupid) WHERE shardstate != 4 AND shardid IN (SELECT * FROM selected_shard) ORDER BY nodeport;
|
||||||
|
|
||||||
|
shardid|nodeport
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1234003| 57638
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
step s1-user-spec:
|
||||||
|
SELECT rolname, rolsuper, rolbypassrls FROM pg_authid WHERE rolname = 'new_user';
|
||||||
|
|
||||||
|
rolname |rolsuper|rolbypassrls
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
new_user|f |f
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
step s3-acquire-advisory-lock:
|
||||||
|
SELECT pg_advisory_lock(44000, 55152);
|
||||||
|
|
||||||
|
pg_advisory_lock
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
step s1-begin:
|
||||||
|
BEGIN;
|
||||||
|
|
||||||
|
step s1-set-role:
|
||||||
|
SET ROLE new_user;
|
||||||
|
|
||||||
|
step s1-move-placement:
|
||||||
|
SELECT citus_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57638, 'localhost', 57637);
|
||||||
|
<waiting ...>
|
||||||
|
step s2-insert:
|
||||||
|
INSERT INTO dist VALUES (23, 23);
|
||||||
|
|
||||||
|
step s3-release-advisory-lock:
|
||||||
|
SELECT pg_advisory_unlock(44000, 55152);
|
||||||
|
|
||||||
|
pg_advisory_unlock
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
step s1-move-placement: <... completed>
|
||||||
|
citus_move_shard_placement
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
step s1-reset-role:
|
||||||
|
RESET ROLE;
|
||||||
|
|
||||||
|
step s1-end:
|
||||||
|
COMMIT;
|
||||||
|
|
||||||
|
step s1-select:
|
||||||
|
SELECT * FROM dist ORDER BY column1;
|
||||||
|
|
||||||
|
column1|column2
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
23| 23
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
step s1-get-shard-distribution:
|
||||||
|
SELECT shardid, nodeport FROM pg_dist_placement INNER JOIN pg_dist_node ON (pg_dist_placement.groupid = pg_dist_node.groupid) WHERE shardstate != 4 AND shardid IN (SELECT * FROM selected_shard) ORDER BY nodeport;
|
||||||
|
|
||||||
|
shardid|nodeport
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1234003| 57637
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
|
||||||
|
starting permutation: s1-no-connection-cache s2-no-connection-cache s3-no-connection-cache s1-table-owner-new_user s1-table-force-rls s1-get-shard-distribution s1-user-spec s3-acquire-advisory-lock s1-begin s1-set-role s1-move-placement s2-insert s3-release-advisory-lock s1-reset-role s1-end s1-select s1-get-shard-distribution
|
||||||
|
step s1-no-connection-cache:
|
||||||
|
SET citus.max_cached_conns_per_worker to 0;
|
||||||
|
|
||||||
|
step s2-no-connection-cache:
|
||||||
|
SET citus.max_cached_conns_per_worker to 0;
|
||||||
|
|
||||||
|
step s3-no-connection-cache:
|
||||||
|
SET citus.max_cached_conns_per_worker to 0;
|
||||||
|
|
||||||
|
step s1-table-owner-new_user:
|
||||||
|
ALTER TABLE dist OWNER TO new_user;
|
||||||
|
|
||||||
|
step s1-table-force-rls:
|
||||||
|
ALTER TABLE dist FORCE ROW LEVEL SECURITY;
|
||||||
|
|
||||||
|
step s1-get-shard-distribution:
|
||||||
|
SELECT shardid, nodeport FROM pg_dist_placement INNER JOIN pg_dist_node ON (pg_dist_placement.groupid = pg_dist_node.groupid) WHERE shardstate != 4 AND shardid IN (SELECT * FROM selected_shard) ORDER BY nodeport;
|
||||||
|
|
||||||
|
shardid|nodeport
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1234003| 57638
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
step s1-user-spec:
|
||||||
|
SELECT rolname, rolsuper, rolbypassrls FROM pg_authid WHERE rolname = 'new_user';
|
||||||
|
|
||||||
|
rolname |rolsuper|rolbypassrls
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
new_user|f |f
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
step s3-acquire-advisory-lock:
|
||||||
|
SELECT pg_advisory_lock(44000, 55152);
|
||||||
|
|
||||||
|
pg_advisory_lock
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
step s1-begin:
|
||||||
|
BEGIN;
|
||||||
|
|
||||||
|
step s1-set-role:
|
||||||
|
SET ROLE new_user;
|
||||||
|
|
||||||
|
step s1-move-placement:
|
||||||
|
SELECT citus_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57638, 'localhost', 57637);
|
||||||
|
<waiting ...>
|
||||||
|
step s2-insert:
|
||||||
|
INSERT INTO dist VALUES (23, 23);
|
||||||
|
|
||||||
|
step s3-release-advisory-lock:
|
||||||
|
SELECT pg_advisory_unlock(44000, 55152);
|
||||||
|
|
||||||
|
pg_advisory_unlock
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
step s1-move-placement: <... completed>
|
||||||
|
citus_move_shard_placement
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
step s1-reset-role:
|
||||||
|
RESET ROLE;
|
||||||
|
|
||||||
|
step s1-end:
|
||||||
|
COMMIT;
|
||||||
|
|
||||||
|
step s1-select:
|
||||||
|
SELECT * FROM dist ORDER BY column1;
|
||||||
|
|
||||||
|
column1|column2
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
23| 23
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
step s1-get-shard-distribution:
|
||||||
|
SELECT shardid, nodeport FROM pg_dist_placement INNER JOIN pg_dist_node ON (pg_dist_placement.groupid = pg_dist_node.groupid) WHERE shardstate != 4 AND shardid IN (SELECT * FROM selected_shard) ORDER BY nodeport;
|
||||||
|
|
||||||
|
shardid|nodeport
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1234003| 57637
|
||||||
|
(1 row)
|
||||||
|
|
|
@ -91,7 +91,7 @@ step s1-drop-marked-shards:
|
||||||
<waiting ...>
|
<waiting ...>
|
||||||
s1: WARNING: canceling statement due to lock timeout
|
s1: WARNING: canceling statement due to lock timeout
|
||||||
step s1-drop-marked-shards: <... completed>
|
step s1-drop-marked-shards: <... completed>
|
||||||
s1: WARNING: Failed to drop 1 orphaned shards out of 1
|
s1: WARNING: failed to clean up 1 orphaned shards out of 1
|
||||||
step s1-commit:
|
step s1-commit:
|
||||||
COMMIT;
|
COMMIT;
|
||||||
|
|
||||||
|
|
|
@ -740,7 +740,7 @@ DETAIL: from localhost:xxxxx
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
CALL citus_cleanup_orphaned_shards();
|
CALL citus_cleanup_orphaned_shards();
|
||||||
LOG: cleaning up public.test_with_pkey_13000042 on localhost:xxxxx which was left after a move
|
LOG: deferred drop of orphaned shard public.test_with_pkey_13000042 on localhost:xxxxx after a move completed
|
||||||
NOTICE: cleaned up 1 orphaned shards
|
NOTICE: cleaned up 1 orphaned shards
|
||||||
SET client_min_messages TO DEFAULT;
|
SET client_min_messages TO DEFAULT;
|
||||||
-- we don't support multiple shard moves in a single transaction
|
-- we don't support multiple shard moves in a single transaction
|
||||||
|
|
|
@ -636,21 +636,6 @@ Aggregate
|
||||||
-> Seq Scan on events_1400285 events
|
-> Seq Scan on events_1400285 events
|
||||||
Filter: ((event_type)::text = ANY ('{click,submit,pay}'::text[]))
|
Filter: ((event_type)::text = ANY ('{click,submit,pay}'::text[]))
|
||||||
-- Union and left join subquery pushdown
|
-- Union and left join subquery pushdown
|
||||||
-- enable_group_by_reordering is a new GUC introduced in PG15
|
|
||||||
-- it does some optimization of the order of group by keys which results
|
|
||||||
-- in a different explain output plan between PG13/14 and PG15
|
|
||||||
-- Hence we set that GUC to off.
|
|
||||||
SHOW server_version \gset
|
|
||||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
|
||||||
\gset
|
|
||||||
\if :server_version_ge_15
|
|
||||||
SET enable_group_by_reordering TO off;
|
|
||||||
\endif
|
|
||||||
SELECT DISTINCT 1 FROM run_command_on_workers($$ALTER SYSTEM SET enable_group_by_reordering TO off;$$);
|
|
||||||
1
|
|
||||||
SELECT run_command_on_workers($$SELECT pg_reload_conf()$$);
|
|
||||||
(localhost,57637,t,t)
|
|
||||||
(localhost,57638,t,t)
|
|
||||||
EXPLAIN (COSTS OFF)
|
EXPLAIN (COSTS OFF)
|
||||||
SELECT
|
SELECT
|
||||||
avg(array_length(events, 1)) AS event_average,
|
avg(array_length(events, 1)) AS event_average,
|
||||||
|
@ -873,14 +858,6 @@ Sort
|
||||||
Sort Key: events_2.composite_id
|
Sort Key: events_2.composite_id
|
||||||
-> Seq Scan on events_1400285 events_2
|
-> Seq Scan on events_1400285 events_2
|
||||||
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type) AND ((event_type)::text = 'pay'::text))
|
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type) AND ((event_type)::text = 'pay'::text))
|
||||||
\if :server_version_ge_15
|
|
||||||
RESET enable_group_by_reordering;
|
|
||||||
\endif
|
|
||||||
SELECT DISTINCT 1 FROM run_command_on_workers($$ALTER SYSTEM RESET enable_group_by_reordering;$$);
|
|
||||||
1
|
|
||||||
SELECT run_command_on_workers($$SELECT pg_reload_conf()$$);
|
|
||||||
(localhost,57637,t,t)
|
|
||||||
(localhost,57638,t,t)
|
|
||||||
-- Lateral join subquery pushdown
|
-- Lateral join subquery pushdown
|
||||||
-- set subquery_pushdown due to limit in the query
|
-- set subquery_pushdown due to limit in the query
|
||||||
SET citus.subquery_pushdown to ON;
|
SET citus.subquery_pushdown to ON;
|
||||||
|
|
|
@ -909,6 +909,24 @@ SELECT * FROM multi_extension.print_extension_changes();
|
||||||
| function worker_fix_partition_shard_index_names(regclass,text,text) void
|
| function worker_fix_partition_shard_index_names(regclass,text,text) void
|
||||||
(4 rows)
|
(4 rows)
|
||||||
|
|
||||||
|
-- There was a bug when downgrading to 10.2-2 from 10.2-4
|
||||||
|
-- Test that we do not have any issues with this particular downgrade
|
||||||
|
ALTER EXTENSION citus UPDATE TO '10.2-2';
|
||||||
|
ALTER EXTENSION citus UPDATE TO '10.2-4';
|
||||||
|
SELECT * FROM multi_extension.print_extension_changes();
|
||||||
|
previous_object | current_object
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
(0 rows)
|
||||||
|
|
||||||
|
-- Test downgrade to 10.2-4 from 10.2-5
|
||||||
|
ALTER EXTENSION citus UPDATE TO '10.2-5';
|
||||||
|
ALTER EXTENSION citus UPDATE TO '10.2-4';
|
||||||
|
-- Should be empty result since upgrade+downgrade should be a no-op
|
||||||
|
SELECT * FROM multi_extension.print_extension_changes();
|
||||||
|
previous_object | current_object
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
(0 rows)
|
||||||
|
|
||||||
-- Snapshot of state at 10.2-5
|
-- Snapshot of state at 10.2-5
|
||||||
ALTER EXTENSION citus UPDATE TO '10.2-5';
|
ALTER EXTENSION citus UPDATE TO '10.2-5';
|
||||||
SELECT * FROM multi_extension.print_extension_changes();
|
SELECT * FROM multi_extension.print_extension_changes();
|
||||||
|
@ -916,9 +934,6 @@ SELECT * FROM multi_extension.print_extension_changes();
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
(0 rows)
|
(0 rows)
|
||||||
|
|
||||||
-- Test downgrade to 10.2-4 from 10.2-5
|
|
||||||
ALTER EXTENSION citus UPDATE TO '10.2-4';
|
|
||||||
ALTER EXTENSION citus UPDATE TO '10.2-5';
|
|
||||||
-- Make sure that we defined dependencies from all rel objects (tables,
|
-- Make sure that we defined dependencies from all rel objects (tables,
|
||||||
-- indexes, sequences ..) to columnar table access method ...
|
-- indexes, sequences ..) to columnar table access method ...
|
||||||
SELECT pg_class.oid INTO columnar_schema_members
|
SELECT pg_class.oid INTO columnar_schema_members
|
||||||
|
@ -1177,7 +1192,7 @@ DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff;
|
||||||
SHOW citus.version;
|
SHOW citus.version;
|
||||||
citus.version
|
citus.version
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
11.1devel
|
11.1.3
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- ensure no unexpected objects were created outside pg_catalog
|
-- ensure no unexpected objects were created outside pg_catalog
|
||||||
|
@ -1521,6 +1536,66 @@ SELECT count(*) FROM pg_stat_activity WHERE application_name = 'Citus Maintenanc
|
||||||
1
|
1
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
|
-- confirm that we can create a distributed table concurrently on an empty node
|
||||||
|
DROP EXTENSION citus;
|
||||||
|
CREATE EXTENSION citus;
|
||||||
|
CREATE TABLE test (x int, y int);
|
||||||
|
INSERT INTO test VALUES (1,2);
|
||||||
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SET citus.defer_drop_after_shard_split TO off;
|
||||||
|
SELECT create_distributed_table_concurrently('test','x');
|
||||||
|
NOTICE: relation test does not have a REPLICA IDENTITY or PRIMARY KEY
|
||||||
|
DETAIL: UPDATE and DELETE commands on the relation will error out during create_distributed_table_concurrently unless there is a REPLICA IDENTITY or PRIMARY KEY. INSERT commands will still work.
|
||||||
|
create_distributed_table_concurrently
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
DROP TABLE test;
|
||||||
|
TRUNCATE pg_dist_node;
|
||||||
|
-- confirm that we can create a distributed table on an empty node
|
||||||
|
CREATE TABLE test (x int, y int);
|
||||||
|
INSERT INTO test VALUES (1,2);
|
||||||
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SELECT create_distributed_table('test','x');
|
||||||
|
NOTICE: Copying data from local table...
|
||||||
|
NOTICE: copying the data has completed
|
||||||
|
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||||
|
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.test$$)
|
||||||
|
create_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
DROP TABLE test;
|
||||||
|
TRUNCATE pg_dist_node;
|
||||||
|
-- confirm that we can create a reference table on an empty node
|
||||||
|
CREATE TABLE test (x int, y int);
|
||||||
|
INSERT INTO test VALUES (1,2);
|
||||||
|
SELECT create_reference_table('test');
|
||||||
|
NOTICE: Copying data from local table...
|
||||||
|
NOTICE: copying the data has completed
|
||||||
|
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||||
|
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.test$$)
|
||||||
|
create_reference_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
DROP TABLE test;
|
||||||
|
TRUNCATE pg_dist_node;
|
||||||
|
-- confirm that we can create a local table on an empty node
|
||||||
|
CREATE TABLE test (x int, y int);
|
||||||
|
INSERT INTO test VALUES (1,2);
|
||||||
|
SELECT citus_add_local_table_to_metadata('test');
|
||||||
|
citus_add_local_table_to_metadata
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
DROP TABLE test;
|
||||||
|
DROP EXTENSION citus;
|
||||||
|
CREATE EXTENSION citus;
|
||||||
DROP TABLE version_mismatch_table;
|
DROP TABLE version_mismatch_table;
|
||||||
DROP SCHEMA multi_extension;
|
DROP SCHEMA multi_extension;
|
||||||
ERROR: cannot drop schema multi_extension because other objects depend on it
|
ERROR: cannot drop schema multi_extension because other objects depend on it
|
||||||
|
|
|
@ -521,9 +521,9 @@ SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' O
|
||||||
tablename | indexname
|
tablename | indexname
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
date_partitioned_citus_local_table | date_partitioned_citus_local_table_measureid_idx
|
date_partitioned_citus_local_table | date_partitioned_citus_local_table_measureid_idx
|
||||||
date_partitioned_citus_local_table_361369 | date_partitioned_citus_local_table_measureid_idx_361369
|
date_partitioned_citus_local_table_361377 | date_partitioned_citus_local_table_measureid_idx_361377
|
||||||
partition_local_table | partition_local_table_measureid_idx
|
partition_local_table | partition_local_table_measureid_idx
|
||||||
partition_local_table_361370 | partition_local_table_measureid_idx_361370
|
partition_local_table_361378 | partition_local_table_measureid_idx_361378
|
||||||
(4 rows)
|
(4 rows)
|
||||||
|
|
||||||
-- creating a single object should only need to trigger fixing the single object
|
-- creating a single object should only need to trigger fixing the single object
|
||||||
|
@ -753,7 +753,7 @@ DETAIL: drop cascades to table not_partitioned
|
||||||
drop cascades to table not_distributed
|
drop cascades to table not_distributed
|
||||||
drop cascades to table fk_table
|
drop cascades to table fk_table
|
||||||
drop cascades to table p
|
drop cascades to table p
|
||||||
drop cascades to table date_partitioned_citus_local_table_361369
|
drop cascades to table date_partitioned_citus_local_table_361377
|
||||||
drop cascades to table date_partitioned_citus_local_table
|
drop cascades to table date_partitioned_citus_local_table
|
||||||
drop cascades to table parent_table
|
drop cascades to table parent_table
|
||||||
SELECT citus_remove_node('localhost', :master_port);
|
SELECT citus_remove_node('localhost', :master_port);
|
||||||
|
|
|
@ -1172,5 +1172,108 @@ SELECT create_distributed_table ('dropfkeytest2', 'x', colocate_with:='none');
|
||||||
|
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
|
CREATE TABLE set_on_default_test_referenced(
|
||||||
|
col_1 int, col_2 int, col_3 int, col_4 int,
|
||||||
|
unique (col_1, col_3)
|
||||||
|
);
|
||||||
|
SELECT create_reference_table('set_on_default_test_referenced');
|
||||||
|
create_reference_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
CREATE TABLE set_on_default_test_referencing(
|
||||||
|
col_1 int, col_2 int, col_3 serial, col_4 int,
|
||||||
|
FOREIGN KEY(col_1, col_3)
|
||||||
|
REFERENCES set_on_default_test_referenced(col_1, col_3)
|
||||||
|
ON UPDATE SET DEFAULT
|
||||||
|
);
|
||||||
|
-- from distributed / reference to reference, fkey exists before calling the UDFs
|
||||||
|
SELECT create_distributed_table('set_on_default_test_referencing', 'col_1');
|
||||||
|
ERROR: cannot create foreign key constraint since Citus does not support ON DELETE / UPDATE SET DEFAULT actions on the columns that default to sequences
|
||||||
|
SELECT create_reference_table('set_on_default_test_referencing');
|
||||||
|
ERROR: cannot create foreign key constraint since Citus does not support ON DELETE / UPDATE SET DEFAULT actions on the columns that default to sequences
|
||||||
|
DROP TABLE set_on_default_test_referencing;
|
||||||
|
CREATE TABLE set_on_default_test_referencing(
|
||||||
|
col_1 serial, col_2 int, col_3 int, col_4 int
|
||||||
|
);
|
||||||
|
SELECT create_reference_table('set_on_default_test_referencing');
|
||||||
|
create_reference_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- from reference to reference, fkey doesn't exist before calling the UDFs
|
||||||
|
ALTER TABLE set_on_default_test_referencing ADD CONSTRAINT fkey
|
||||||
|
FOREIGN KEY(col_1, col_3) REFERENCES set_on_default_test_referenced(col_1, col_3)
|
||||||
|
ON DELETE SET DEFAULT;
|
||||||
|
ERROR: cannot create foreign key constraint since Citus does not support ON DELETE / UPDATE SET DEFAULT actions on the columns that default to sequences
|
||||||
|
DROP TABLE set_on_default_test_referencing;
|
||||||
|
CREATE TABLE set_on_default_test_referencing(
|
||||||
|
col_1 int, col_2 serial, col_3 int, col_4 bigserial
|
||||||
|
);
|
||||||
|
SELECT create_reference_table('set_on_default_test_referencing');
|
||||||
|
create_reference_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- ok since referencing columns are not based on sequences
|
||||||
|
ALTER TABLE set_on_default_test_referencing ADD CONSTRAINT fkey
|
||||||
|
FOREIGN KEY(col_1, col_3) REFERENCES set_on_default_test_referenced(col_1, col_3)
|
||||||
|
ON DELETE SET DEFAULT;
|
||||||
|
DROP TABLE set_on_default_test_referencing;
|
||||||
|
CREATE SEQUENCE test_sequence;
|
||||||
|
CREATE TABLE set_on_default_test_referencing(
|
||||||
|
col_1 int, col_2 int, col_3 int DEFAULT nextval('test_sequence'), col_4 int
|
||||||
|
);
|
||||||
|
SELECT create_distributed_table('set_on_default_test_referencing', 'col_1');
|
||||||
|
create_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- from distributed to reference, fkey doesn't exist before calling the UDFs
|
||||||
|
ALTER TABLE set_on_default_test_referencing ADD CONSTRAINT fkey
|
||||||
|
FOREIGN KEY(col_1, col_3) REFERENCES set_on_default_test_referenced(col_1, col_3)
|
||||||
|
ON DELETE SET DEFAULT ON UPDATE SET DEFAULT;
|
||||||
|
ERROR: cannot create foreign key constraint since Citus does not support ON DELETE / UPDATE SET DEFAULT actions on the columns that default to sequences
|
||||||
|
DROP TABLE set_on_default_test_referenced;
|
||||||
|
CREATE TABLE set_on_default_test_referenced(
|
||||||
|
col_1 int, col_2 int, col_3 int, col_4 int,
|
||||||
|
unique (col_1, col_3)
|
||||||
|
);
|
||||||
|
SELECT create_distributed_table('set_on_default_test_referenced', 'col_1');
|
||||||
|
create_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
DROP TABLE set_on_default_test_referencing;
|
||||||
|
CREATE TABLE set_on_default_test_referencing(
|
||||||
|
col_1 bigserial, col_2 int, col_3 int DEFAULT nextval('test_sequence'), col_4 int,
|
||||||
|
FOREIGN KEY(col_1, col_3)
|
||||||
|
REFERENCES set_on_default_test_referenced(col_1, col_3)
|
||||||
|
ON DELETE SET DEFAULT
|
||||||
|
);
|
||||||
|
-- from distributed to distributed, fkey exists before calling the UDFs
|
||||||
|
SELECT create_distributed_table('set_on_default_test_referencing', 'col_1');
|
||||||
|
ERROR: cannot create foreign key constraint since Citus does not support ON DELETE / UPDATE SET DEFAULT actions on the columns that default to sequences
|
||||||
|
DROP TABLE set_on_default_test_referencing;
|
||||||
|
CREATE TABLE set_on_default_test_referencing(
|
||||||
|
col_1 int DEFAULT nextval('test_sequence'), col_2 int, col_3 int, col_4 int
|
||||||
|
);
|
||||||
|
SELECT create_distributed_table('set_on_default_test_referencing', 'col_1');
|
||||||
|
create_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- from distributed to distributed, fkey doesn't exist before calling the UDFs
|
||||||
|
ALTER TABLE set_on_default_test_referencing ADD CONSTRAINT fkey
|
||||||
|
FOREIGN KEY(col_1, col_3) REFERENCES set_on_default_test_referenced(col_1, col_3)
|
||||||
|
ON DELETE SET DEFAULT;
|
||||||
|
ERROR: cannot create foreign key constraint since Citus does not support ON DELETE / UPDATE SET DEFAULT actions on the columns that default to sequences
|
||||||
-- we no longer need those tables
|
-- we no longer need those tables
|
||||||
DROP TABLE referenced_by_reference_table, references_to_reference_table, reference_table, reference_table_second, referenced_local_table, self_referencing_reference_table, dropfkeytest2;
|
DROP TABLE referenced_by_reference_table, references_to_reference_table, reference_table, reference_table_second, referenced_local_table, self_referencing_reference_table, dropfkeytest2,
|
||||||
|
set_on_default_test_referenced, set_on_default_test_referencing;
|
||||||
|
|
|
@ -4324,12 +4324,66 @@ WHERE schemaname = 'partitioning_schema' AND tablename ilike '%part_table_with_%
|
||||||
(6 rows)
|
(6 rows)
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
SET search_path TO partitioning_schema;
|
||||||
|
-- create parent table
|
||||||
|
CREATE TABLE stxdinp(i int, a int, b int) PARTITION BY RANGE (i);
|
||||||
|
-- create partition
|
||||||
|
CREATE TABLE stxdinp1 PARTITION OF stxdinp FOR VALUES FROM (1) TO (100);
|
||||||
|
-- populate table
|
||||||
|
INSERT INTO stxdinp SELECT 1, a/100, a/100 FROM generate_series(1, 999) a;
|
||||||
|
-- create extended statistics
|
||||||
|
CREATE STATISTICS stxdinp ON a, b FROM stxdinp;
|
||||||
|
-- distribute parent table
|
||||||
|
SELECT create_distributed_table('stxdinp', 'i');
|
||||||
|
NOTICE: Copying data from local table...
|
||||||
|
NOTICE: copying the data has completed
|
||||||
|
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||||
|
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$partitioning_schema.stxdinp1$$)
|
||||||
|
create_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- run select query, works fine
|
||||||
|
SELECT a, b FROM stxdinp GROUP BY 1, 2;
|
||||||
|
a | b
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1 | 1
|
||||||
|
3 | 3
|
||||||
|
7 | 7
|
||||||
|
2 | 2
|
||||||
|
8 | 8
|
||||||
|
0 | 0
|
||||||
|
5 | 5
|
||||||
|
6 | 6
|
||||||
|
9 | 9
|
||||||
|
4 | 4
|
||||||
|
(10 rows)
|
||||||
|
|
||||||
|
-- partitions are processed recursively for PG15+
|
||||||
|
VACUUM ANALYZE stxdinp;
|
||||||
|
SELECT a, b FROM stxdinp GROUP BY 1, 2;
|
||||||
|
a | b
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1 | 1
|
||||||
|
3 | 3
|
||||||
|
7 | 7
|
||||||
|
2 | 2
|
||||||
|
8 | 8
|
||||||
|
0 | 0
|
||||||
|
5 | 5
|
||||||
|
6 | 6
|
||||||
|
9 | 9
|
||||||
|
4 | 4
|
||||||
|
(10 rows)
|
||||||
|
|
||||||
DROP SCHEMA partitioning_schema CASCADE;
|
DROP SCHEMA partitioning_schema CASCADE;
|
||||||
NOTICE: drop cascades to 4 other objects
|
NOTICE: drop cascades to 5 other objects
|
||||||
DETAIL: drop cascades to table partitioning_schema."schema-test"
|
DETAIL: drop cascades to table "schema-test"
|
||||||
drop cascades to table partitioning_schema.another_distributed_table
|
drop cascades to table another_distributed_table
|
||||||
drop cascades to table partitioning_schema.distributed_parent_table
|
drop cascades to table distributed_parent_table
|
||||||
drop cascades to table partitioning_schema.part_table_with_very_long_name
|
drop cascades to table part_table_with_very_long_name
|
||||||
|
drop cascades to table stxdinp
|
||||||
RESET search_path;
|
RESET search_path;
|
||||||
DROP TABLE IF EXISTS
|
DROP TABLE IF EXISTS
|
||||||
partitioning_hash_test,
|
partitioning_hash_test,
|
||||||
|
|
|
@ -3,13 +3,6 @@
|
||||||
--
|
--
|
||||||
-- Tests select distinct, and select distinct on features.
|
-- Tests select distinct, and select distinct on features.
|
||||||
--
|
--
|
||||||
SHOW server_version \gset
|
|
||||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
|
||||||
server_version_ge_15
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
t
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
ANALYZE lineitem_hash_part;
|
ANALYZE lineitem_hash_part;
|
||||||
-- function calls are supported
|
-- function calls are supported
|
||||||
SELECT DISTINCT l_orderkey, now() FROM lineitem_hash_part LIMIT 0;
|
SELECT DISTINCT l_orderkey, now() FROM lineitem_hash_part LIMIT 0;
|
||||||
|
@ -446,10 +439,9 @@ EXPLAIN (COSTS FALSE)
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
Limit
|
Limit
|
||||||
|
-> Unique
|
||||||
-> Sort
|
-> Sort
|
||||||
Sort Key: remote_scan.l_suppkey, ((pg_catalog.sum(remote_scan.avg) / pg_catalog.sum(remote_scan.avg_1)))
|
Sort Key: remote_scan.l_suppkey, ((pg_catalog.sum(remote_scan.avg) / pg_catalog.sum(remote_scan.avg_1)))
|
||||||
-> HashAggregate
|
|
||||||
Group Key: remote_scan.l_suppkey, (pg_catalog.sum(remote_scan.avg) / pg_catalog.sum(remote_scan.avg_1))
|
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Group Key: remote_scan.l_suppkey, remote_scan.worker_column_4
|
Group Key: remote_scan.l_suppkey, remote_scan.worker_column_4
|
||||||
-> Custom Scan (Citus Adaptive)
|
-> Custom Scan (Citus Adaptive)
|
||||||
|
@ -460,7 +452,7 @@ EXPLAIN (COSTS FALSE)
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Group Key: l_suppkey, l_linenumber
|
Group Key: l_suppkey, l_linenumber
|
||||||
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
|
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
|
||||||
(15 rows)
|
(14 rows)
|
||||||
|
|
||||||
-- check the plan if the hash aggreate is disabled. This explain errors out due
|
-- check the plan if the hash aggreate is disabled. This explain errors out due
|
||||||
-- to a bug right now, expectation must be corrected after fixing it.
|
-- to a bug right now, expectation must be corrected after fixing it.
|
||||||
|
@ -598,10 +590,9 @@ EXPLAIN (COSTS FALSE)
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
Limit
|
Limit
|
||||||
|
-> Unique
|
||||||
-> Sort
|
-> Sort
|
||||||
Sort Key: ((sum(remote_scan.avg) / (pg_catalog.sum(remote_scan.avg_1))::double precision))
|
Sort Key: ((sum(remote_scan.avg) / (pg_catalog.sum(remote_scan.avg_1))::double precision))
|
||||||
-> HashAggregate
|
|
||||||
Group Key: (sum(remote_scan.avg) / (pg_catalog.sum(remote_scan.avg_1))::double precision)
|
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Group Key: remote_scan.worker_column_3, remote_scan.worker_column_4
|
Group Key: remote_scan.worker_column_3, remote_scan.worker_column_4
|
||||||
-> Custom Scan (Citus Adaptive)
|
-> Custom Scan (Citus Adaptive)
|
||||||
|
@ -612,7 +603,7 @@ EXPLAIN (COSTS FALSE)
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Group Key: l_suppkey, l_linenumber
|
Group Key: l_suppkey, l_linenumber
|
||||||
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
|
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
|
||||||
(15 rows)
|
(14 rows)
|
||||||
|
|
||||||
-- check the plan if the hash aggreate is disabled. This explain errors out due
|
-- check the plan if the hash aggreate is disabled. This explain errors out due
|
||||||
-- to a bug right now, expectation must be corrected after fixing it.
|
-- to a bug right now, expectation must be corrected after fixing it.
|
||||||
|
@ -674,10 +665,9 @@ EXPLAIN (COSTS FALSE)
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
Limit
|
Limit
|
||||||
|
-> Unique
|
||||||
-> Sort
|
-> Sort
|
||||||
Sort Key: (((pg_catalog.sum(remote_scan.dis))::bigint + COALESCE((pg_catalog.sum(remote_scan.dis_1))::bigint, '0'::bigint)))
|
Sort Key: (((pg_catalog.sum(remote_scan.dis))::bigint + COALESCE((pg_catalog.sum(remote_scan.dis_1))::bigint, '0'::bigint)))
|
||||||
-> HashAggregate
|
|
||||||
Group Key: ((pg_catalog.sum(remote_scan.dis))::bigint + COALESCE((pg_catalog.sum(remote_scan.dis_1))::bigint, '0'::bigint))
|
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Group Key: remote_scan.worker_column_3, remote_scan.worker_column_4
|
Group Key: remote_scan.worker_column_3, remote_scan.worker_column_4
|
||||||
-> Custom Scan (Citus Adaptive)
|
-> Custom Scan (Citus Adaptive)
|
||||||
|
@ -688,7 +678,7 @@ EXPLAIN (COSTS FALSE)
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Group Key: l_suppkey, l_linenumber
|
Group Key: l_suppkey, l_linenumber
|
||||||
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
|
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
|
||||||
(15 rows)
|
(14 rows)
|
||||||
|
|
||||||
-- check the plan if the hash aggreate is disabled. This explain errors out due
|
-- check the plan if the hash aggreate is disabled. This explain errors out due
|
||||||
-- to a bug right now, expectation must be corrected after fixing it.
|
-- to a bug right now, expectation must be corrected after fixing it.
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -758,7 +758,7 @@ SET search_path to "Tenant Isolation";
|
||||||
\set VERBOSITY terse
|
\set VERBOSITY terse
|
||||||
SELECT isolate_tenant_to_new_shard('orders_streaming', 104, 'CASCADE', shard_transfer_mode => 'block_writes');
|
SELECT isolate_tenant_to_new_shard('orders_streaming', 104, 'CASCADE', shard_transfer_mode => 'block_writes');
|
||||||
WARNING: command DROP TABLE is disabled
|
WARNING: command DROP TABLE is disabled
|
||||||
WARNING: Failed to cleanup 1 shards out of 1
|
WARNING: failed to clean up 1 orphaned shards out of 1 after a isolate_tenant_to_new_shard operation failed
|
||||||
ERROR: command CREATE TABLE is disabled
|
ERROR: command CREATE TABLE is disabled
|
||||||
\set VERBOSITY default
|
\set VERBOSITY default
|
||||||
\c - postgres - :worker_1_port
|
\c - postgres - :worker_1_port
|
||||||
|
@ -811,7 +811,7 @@ WARNING: command DROP TABLE is disabled
|
||||||
WARNING: command DROP TABLE is disabled
|
WARNING: command DROP TABLE is disabled
|
||||||
WARNING: command DROP TABLE is disabled
|
WARNING: command DROP TABLE is disabled
|
||||||
WARNING: command DROP TABLE is disabled
|
WARNING: command DROP TABLE is disabled
|
||||||
WARNING: Failed to cleanup 6 shards out of 6
|
WARNING: failed to clean up 6 orphaned shards out of 6 after a isolate_tenant_to_new_shard operation failed
|
||||||
ERROR: command DROP TABLE is disabled
|
ERROR: command DROP TABLE is disabled
|
||||||
\set VERBOSITY default
|
\set VERBOSITY default
|
||||||
-- check if metadata is changed
|
-- check if metadata is changed
|
||||||
|
|
|
@ -790,7 +790,7 @@ SET search_path to "Tenant Isolation";
|
||||||
\set VERBOSITY terse
|
\set VERBOSITY terse
|
||||||
SELECT isolate_tenant_to_new_shard('orders_streaming', 104, 'CASCADE', shard_transfer_mode => 'force_logical');
|
SELECT isolate_tenant_to_new_shard('orders_streaming', 104, 'CASCADE', shard_transfer_mode => 'force_logical');
|
||||||
WARNING: command DROP TABLE is disabled
|
WARNING: command DROP TABLE is disabled
|
||||||
WARNING: Failed to cleanup 1 shards out of 1
|
WARNING: failed to clean up 1 orphaned shards out of 1 after a isolate_tenant_to_new_shard operation failed
|
||||||
ERROR: command CREATE TABLE is disabled
|
ERROR: command CREATE TABLE is disabled
|
||||||
\set VERBOSITY default
|
\set VERBOSITY default
|
||||||
\c - postgres - :worker_1_port
|
\c - postgres - :worker_1_port
|
||||||
|
|
|
@ -171,7 +171,7 @@ SELECT * FROM sale_triggers ORDER BY 1, 2;
|
||||||
-- test that we can't rename a distributed clone trigger
|
-- test that we can't rename a distributed clone trigger
|
||||||
ALTER TRIGGER "new_record_sale_trigger" ON "pg15"."sale_newyork" RENAME TO "another_trigger_name";
|
ALTER TRIGGER "new_record_sale_trigger" ON "pg15"."sale_newyork" RENAME TO "another_trigger_name";
|
||||||
ERROR: cannot rename trigger "new_record_sale_trigger" on table "sale_newyork"
|
ERROR: cannot rename trigger "new_record_sale_trigger" on table "sale_newyork"
|
||||||
HINT: Rename trigger on partitioned table "sale" instead.
|
HINT: Rename the trigger on the partitioned table "sale" instead.
|
||||||
--
|
--
|
||||||
-- In PG15, For GENERATED columns, all dependencies of the generation
|
-- In PG15, For GENERATED columns, all dependencies of the generation
|
||||||
-- expression are recorded as NORMAL dependencies of the column itself.
|
-- expression are recorded as NORMAL dependencies of the column itself.
|
||||||
|
@ -349,6 +349,58 @@ NOTICE: renaming the new table to pg15.tbl2
|
||||||
|
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
|
-- Make sure that we allow foreign key columns on local tables added to
|
||||||
|
-- metadata to have SET NULL/DEFAULT on column basis.
|
||||||
|
CREATE TABLE PKTABLE_local (tid int, id int, PRIMARY KEY (tid, id));
|
||||||
|
CREATE TABLE FKTABLE_local (
|
||||||
|
tid int, id int,
|
||||||
|
fk_id_del_set_null int,
|
||||||
|
fk_id_del_set_default int DEFAULT 0,
|
||||||
|
FOREIGN KEY (tid, fk_id_del_set_null) REFERENCES PKTABLE_local ON DELETE SET NULL (fk_id_del_set_null),
|
||||||
|
FOREIGN KEY (tid, fk_id_del_set_default) REFERENCES PKTABLE_local ON DELETE SET DEFAULT (fk_id_del_set_default)
|
||||||
|
);
|
||||||
|
SELECT citus_add_local_table_to_metadata('FKTABLE_local', cascade_via_foreign_keys=>true);
|
||||||
|
citus_add_local_table_to_metadata
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- show that the definition is expected
|
||||||
|
SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conrelid = 'FKTABLE_local'::regclass::oid ORDER BY oid;
|
||||||
|
pg_get_constraintdef
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
FOREIGN KEY (tid, fk_id_del_set_default) REFERENCES pktable_local(tid, id) ON DELETE SET DEFAULT (fk_id_del_set_default)
|
||||||
|
FOREIGN KEY (tid, fk_id_del_set_null) REFERENCES pktable_local(tid, id) ON DELETE SET NULL (fk_id_del_set_null)
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
\c - - - :worker_1_port
|
||||||
|
SET search_path TO pg15;
|
||||||
|
-- show that the definition is expected on the worker as well
|
||||||
|
SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conrelid = 'FKTABLE_local'::regclass::oid ORDER BY oid;
|
||||||
|
pg_get_constraintdef
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
FOREIGN KEY (tid, fk_id_del_set_default) REFERENCES pktable_local(tid, id) ON DELETE SET DEFAULT (fk_id_del_set_default)
|
||||||
|
FOREIGN KEY (tid, fk_id_del_set_null) REFERENCES pktable_local(tid, id) ON DELETE SET NULL (fk_id_del_set_null)
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
-- also, make sure that it works as expected
|
||||||
|
INSERT INTO PKTABLE_local VALUES (1, 0), (1, 1), (1, 2);
|
||||||
|
INSERT INTO FKTABLE_local VALUES
|
||||||
|
(1, 1, 1, NULL),
|
||||||
|
(1, 2, NULL, 2);
|
||||||
|
DELETE FROM PKTABLE_local WHERE id = 1 OR id = 2;
|
||||||
|
SELECT * FROM FKTABLE_local ORDER BY id;
|
||||||
|
tid | id | fk_id_del_set_null | fk_id_del_set_default
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1 | 1 | |
|
||||||
|
1 | 2 | | 0
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
\c - - - :master_port
|
||||||
|
SET search_path TO pg15;
|
||||||
|
SET client_min_messages to ERROR;
|
||||||
|
DROP TABLE FKTABLE_local, PKTABLE_local;
|
||||||
|
RESET client_min_messages;
|
||||||
SELECT 1 FROM citus_remove_node('localhost', :master_port);
|
SELECT 1 FROM citus_remove_node('localhost', :master_port);
|
||||||
?column?
|
?column?
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
|
@ -878,8 +930,453 @@ WARNING: not propagating CLUSTER command for partitioned table to worker nodes
|
||||||
HINT: Provide a child partition table names in order to CLUSTER distributed partitioned tables.
|
HINT: Provide a child partition table names in order to CLUSTER distributed partitioned tables.
|
||||||
-- verify that we can still cluster the partition tables now since replication factor is 1
|
-- verify that we can still cluster the partition tables now since replication factor is 1
|
||||||
CLUSTER sale_newyork_repl_factor_1 USING sale_newyork_repl_factor_1_pkey;
|
CLUSTER sale_newyork_repl_factor_1 USING sale_newyork_repl_factor_1_pkey;
|
||||||
|
create table reservations ( room_id integer not null, booked_during daterange );
|
||||||
|
insert into reservations values
|
||||||
|
-- 1: has a meets and a gap
|
||||||
|
(1, daterange('2018-07-01', '2018-07-07')),
|
||||||
|
(1, daterange('2018-07-07', '2018-07-14')),
|
||||||
|
(1, daterange('2018-07-20', '2018-07-22')),
|
||||||
|
-- 2: just a single row
|
||||||
|
(2, daterange('2018-07-01', '2018-07-03')),
|
||||||
|
-- 3: one null range
|
||||||
|
(3, NULL),
|
||||||
|
-- 4: two null ranges
|
||||||
|
(4, NULL),
|
||||||
|
(4, NULL),
|
||||||
|
-- 5: a null range and a non-null range
|
||||||
|
(5, NULL),
|
||||||
|
(5, daterange('2018-07-01', '2018-07-03')),
|
||||||
|
-- 6: has overlap
|
||||||
|
(6, daterange('2018-07-01', '2018-07-07')),
|
||||||
|
(6, daterange('2018-07-05', '2018-07-10')),
|
||||||
|
-- 7: two ranges that meet: no gap or overlap
|
||||||
|
(7, daterange('2018-07-01', '2018-07-07')),
|
||||||
|
(7, daterange('2018-07-07', '2018-07-14')),
|
||||||
|
-- 8: an empty range
|
||||||
|
(8, 'empty'::daterange);
|
||||||
|
SELECT create_distributed_table('reservations', 'room_id');
|
||||||
|
NOTICE: Copying data from local table...
|
||||||
|
NOTICE: copying the data has completed
|
||||||
|
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||||
|
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$pg15.reservations$$)
|
||||||
|
create_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- should be fine to pushdown range_agg
|
||||||
|
SELECT room_id, range_agg(booked_during ORDER BY booked_during)
|
||||||
|
FROM reservations
|
||||||
|
GROUP BY room_id
|
||||||
|
ORDER BY room_id;
|
||||||
|
room_id | range_agg
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1 | {[07-01-2018,07-14-2018),[07-20-2018,07-22-2018)}
|
||||||
|
2 | {[07-01-2018,07-03-2018)}
|
||||||
|
3 |
|
||||||
|
4 |
|
||||||
|
5 | {[07-01-2018,07-03-2018)}
|
||||||
|
6 | {[07-01-2018,07-10-2018)}
|
||||||
|
7 | {[07-01-2018,07-14-2018)}
|
||||||
|
8 | {}
|
||||||
|
(8 rows)
|
||||||
|
|
||||||
|
-- should be fine to apply range_agg on the coordinator
|
||||||
|
SELECT room_id + 1, range_agg(booked_during ORDER BY booked_during)
|
||||||
|
FROM reservations
|
||||||
|
GROUP BY room_id + 1
|
||||||
|
ORDER BY room_id + 1;
|
||||||
|
?column? | range_agg
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
2 | {[07-01-2018,07-14-2018),[07-20-2018,07-22-2018)}
|
||||||
|
3 | {[07-01-2018,07-03-2018)}
|
||||||
|
4 |
|
||||||
|
5 |
|
||||||
|
6 | {[07-01-2018,07-03-2018)}
|
||||||
|
7 | {[07-01-2018,07-10-2018)}
|
||||||
|
8 | {[07-01-2018,07-14-2018)}
|
||||||
|
9 | {}
|
||||||
|
(8 rows)
|
||||||
|
|
||||||
|
-- min() and max() for xid8
|
||||||
|
create table xid8_t1 (x xid8, y int);
|
||||||
|
insert into xid8_t1 values ('0', 1), ('010', 2), ('42', 3), ('0xffffffffffffffff', 4), ('-1', 5);
|
||||||
|
SELECT create_distributed_table('xid8_t1', 'x');
|
||||||
|
NOTICE: Copying data from local table...
|
||||||
|
NOTICE: copying the data has completed
|
||||||
|
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||||
|
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$pg15.xid8_t1$$)
|
||||||
|
create_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
select min(x), max(x) from xid8_t1 ORDER BY 1,2;
|
||||||
|
min | max
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
0 | 18446744073709551615
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
select min(x), max(x) from xid8_t1 GROUP BY x ORDER BY 1,2;
|
||||||
|
min | max
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
0 | 0
|
||||||
|
8 | 8
|
||||||
|
42 | 42
|
||||||
|
18446744073709551615 | 18446744073709551615
|
||||||
|
(4 rows)
|
||||||
|
|
||||||
|
select min(x), max(x) from xid8_t1 GROUP BY y ORDER BY 1,2;
|
||||||
|
min | max
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
0 | 0
|
||||||
|
8 | 8
|
||||||
|
42 | 42
|
||||||
|
18446744073709551615 | 18446744073709551615
|
||||||
|
18446744073709551615 | 18446744073709551615
|
||||||
|
(5 rows)
|
||||||
|
|
||||||
|
--
|
||||||
|
-- PG15 introduces security invoker views
|
||||||
|
-- Citus supports these views because permissions in the shards
|
||||||
|
-- are already checked for the view invoker
|
||||||
|
--
|
||||||
|
-- create a distributed table and populate it
|
||||||
|
CREATE TABLE events (tenant_id int, event_id int, descr text);
|
||||||
|
SELECT create_distributed_table('events','tenant_id');
|
||||||
|
create_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
INSERT INTO events VALUES (1, 1, 'push');
|
||||||
|
INSERT INTO events VALUES (2, 2, 'push');
|
||||||
|
-- create a security invoker view with underlying distributed table
|
||||||
|
-- the view will be distributed with security_invoker option as well
|
||||||
|
CREATE VIEW sec_invoker_view WITH (security_invoker=true) AS SELECT * FROM events;
|
||||||
|
\c - - - :worker_1_port
|
||||||
|
SELECT relname, reloptions FROM pg_class
|
||||||
|
WHERE relname = 'sec_invoker_view' AND relnamespace = 'pg15'::regnamespace;
|
||||||
|
relname | reloptions
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
sec_invoker_view | {security_invoker=true}
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
\c - - - :master_port
|
||||||
|
SET search_path TO pg15;
|
||||||
|
-- test altering the security_invoker flag
|
||||||
|
ALTER VIEW sec_invoker_view SET (security_invoker = false);
|
||||||
|
\c - - - :worker_1_port
|
||||||
|
SELECT relname, reloptions FROM pg_class
|
||||||
|
WHERE relname = 'sec_invoker_view' AND relnamespace = 'pg15'::regnamespace;
|
||||||
|
relname | reloptions
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
sec_invoker_view | {security_invoker=false}
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
\c - - - :master_port
|
||||||
|
SET search_path TO pg15;
|
||||||
|
ALTER VIEW sec_invoker_view SET (security_invoker = true);
|
||||||
|
-- create a new user but don't give select permission to events table
|
||||||
|
-- only give select permission to the view
|
||||||
|
CREATE ROLE rls_tenant_1 WITH LOGIN;
|
||||||
|
GRANT USAGE ON SCHEMA pg15 TO rls_tenant_1;
|
||||||
|
GRANT SELECT ON sec_invoker_view TO rls_tenant_1;
|
||||||
|
-- this user shouldn't be able to query the view
|
||||||
|
-- because the view is security invoker
|
||||||
|
-- which means it will check the invoker's rights
|
||||||
|
-- against the view's underlying tables
|
||||||
|
SET ROLE rls_tenant_1;
|
||||||
|
SELECT * FROM sec_invoker_view ORDER BY event_id;
|
||||||
|
ERROR: permission denied for table events
|
||||||
|
RESET ROLE;
|
||||||
|
-- now grant select on the underlying distributed table
|
||||||
|
-- and try again
|
||||||
|
-- now it should work!
|
||||||
|
GRANT SELECT ON TABLE events TO rls_tenant_1;
|
||||||
|
SET ROLE rls_tenant_1;
|
||||||
|
SELECT * FROM sec_invoker_view ORDER BY event_id;
|
||||||
|
tenant_id | event_id | descr
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1 | 1 | push
|
||||||
|
2 | 2 | push
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
RESET ROLE;
|
||||||
|
-- Enable row level security
|
||||||
|
ALTER TABLE events ENABLE ROW LEVEL SECURITY;
|
||||||
|
-- Create policy for tenants to read access their own rows
|
||||||
|
CREATE POLICY user_mod ON events
|
||||||
|
FOR SELECT TO rls_tenant_1
|
||||||
|
USING (current_user = 'rls_tenant_' || tenant_id::text);
|
||||||
|
-- all rows should be visible because we are querying with
|
||||||
|
-- the table owner user now
|
||||||
|
SELECT * FROM sec_invoker_view ORDER BY event_id;
|
||||||
|
tenant_id | event_id | descr
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1 | 1 | push
|
||||||
|
2 | 2 | push
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
-- Switch user that has been granted rights,
|
||||||
|
-- should be able to see rows that the policy allows
|
||||||
|
SET ROLE rls_tenant_1;
|
||||||
|
SELECT * FROM sec_invoker_view ORDER BY event_id;
|
||||||
|
tenant_id | event_id | descr
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1 | 1 | push
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
RESET ROLE;
|
||||||
|
-- ordinary view on top of security invoker view permissions
|
||||||
|
-- ordinary means security definer view
|
||||||
|
-- The PG expected behavior is that this doesn't change anything!!!
|
||||||
|
-- Can't escape security invoker views by defining a security definer view on top of it!
|
||||||
|
CREATE VIEW sec_definer_view AS SELECT * FROM sec_invoker_view ORDER BY event_id;
|
||||||
|
\c - - - :worker_1_port
|
||||||
|
SELECT relname, reloptions FROM pg_class
|
||||||
|
WHERE relname = 'sec_definer_view' AND relnamespace = 'pg15'::regnamespace;
|
||||||
|
relname | reloptions
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
sec_definer_view |
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
\c - - - :master_port
|
||||||
|
SET search_path TO pg15;
|
||||||
|
CREATE ROLE rls_tenant_2 WITH LOGIN;
|
||||||
|
GRANT USAGE ON SCHEMA pg15 TO rls_tenant_2;
|
||||||
|
GRANT SELECT ON sec_definer_view TO rls_tenant_2;
|
||||||
|
-- it doesn't matter that the parent view is security definer
|
||||||
|
-- still the security invoker view will check the invoker's permissions
|
||||||
|
-- and will not allow rls_tenant_2 to query the view
|
||||||
|
SET ROLE rls_tenant_2;
|
||||||
|
SELECT * FROM sec_definer_view ORDER BY event_id;
|
||||||
|
ERROR: permission denied for table events
|
||||||
|
RESET ROLE;
|
||||||
|
-- grant select rights to rls_tenant_2
|
||||||
|
GRANT SELECT ON TABLE events TO rls_tenant_2;
|
||||||
|
-- we still have row level security so rls_tenant_2
|
||||||
|
-- will be able to query but won't be able to see anything
|
||||||
|
SET ROLE rls_tenant_2;
|
||||||
|
SELECT * FROM sec_definer_view ORDER BY event_id;
|
||||||
|
tenant_id | event_id | descr
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
(0 rows)
|
||||||
|
|
||||||
|
RESET ROLE;
|
||||||
|
-- give some rights to rls_tenant_2
|
||||||
|
CREATE POLICY user_mod_1 ON events
|
||||||
|
FOR SELECT TO rls_tenant_2
|
||||||
|
USING (current_user = 'rls_tenant_' || tenant_id::text);
|
||||||
|
-- Row level security will be applied as well! We are safe!
|
||||||
|
SET ROLE rls_tenant_2;
|
||||||
|
SELECT * FROM sec_definer_view ORDER BY event_id;
|
||||||
|
tenant_id | event_id | descr
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
2 | 2 | push
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
RESET ROLE;
|
||||||
|
-- no need to test updatable views because they are currently not
|
||||||
|
-- supported in Citus when the query view contains citus tables
|
||||||
|
UPDATE sec_invoker_view SET event_id = 5;
|
||||||
|
ERROR: cannot modify views when the query contains citus tables
|
||||||
|
--
|
||||||
|
-- Not allow ON DELETE/UPDATE SET DEFAULT actions on columns that
|
||||||
|
-- default to sequences
|
||||||
|
-- Adding a special test here since in PG15 we can
|
||||||
|
-- specify column list for foreign key ON DELETE SET actions
|
||||||
|
-- Relevant PG commit:
|
||||||
|
-- d6f96ed94e73052f99a2e545ed17a8b2fdc1fb8a
|
||||||
|
--
|
||||||
|
CREATE TABLE set_on_default_test_referenced(
|
||||||
|
col_1 int, col_2 int, col_3 int, col_4 int,
|
||||||
|
unique (col_1, col_3)
|
||||||
|
);
|
||||||
|
SELECT create_reference_table('set_on_default_test_referenced');
|
||||||
|
create_reference_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
CREATE TABLE set_on_default_test_referencing(
|
||||||
|
col_1 int, col_2 int, col_3 serial, col_4 int,
|
||||||
|
FOREIGN KEY(col_1, col_3)
|
||||||
|
REFERENCES set_on_default_test_referenced(col_1, col_3)
|
||||||
|
ON DELETE SET DEFAULT (col_1)
|
||||||
|
ON UPDATE SET DEFAULT
|
||||||
|
);
|
||||||
|
-- should error since col_3 defaults to a sequence
|
||||||
|
SELECT create_reference_table('set_on_default_test_referencing');
|
||||||
|
ERROR: cannot create foreign key constraint since Citus does not support ON DELETE / UPDATE SET DEFAULT actions on the columns that default to sequences
|
||||||
|
DROP TABLE set_on_default_test_referencing;
|
||||||
|
CREATE TABLE set_on_default_test_referencing(
|
||||||
|
col_1 int, col_2 int, col_3 serial, col_4 int,
|
||||||
|
FOREIGN KEY(col_1, col_3)
|
||||||
|
REFERENCES set_on_default_test_referenced(col_1, col_3)
|
||||||
|
ON DELETE SET DEFAULT (col_1)
|
||||||
|
);
|
||||||
|
-- should not error since this doesn't set any sequence based columns to default
|
||||||
|
SELECT create_reference_table('set_on_default_test_referencing');
|
||||||
|
create_reference_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
INSERT INTO set_on_default_test_referenced (col_1, col_3) VALUES (1, 1);
|
||||||
|
INSERT INTO set_on_default_test_referencing (col_1, col_3) VALUES (1, 1);
|
||||||
|
DELETE FROM set_on_default_test_referenced;
|
||||||
|
SELECT * FROM set_on_default_test_referencing ORDER BY 1,2;
|
||||||
|
col_1 | col_2 | col_3 | col_4
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
| | 1 |
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
DROP TABLE set_on_default_test_referencing;
|
||||||
|
SET client_min_messages to ERROR;
|
||||||
|
SELECT 1 FROM citus_add_node('localhost', :master_port, groupId => 0);
|
||||||
|
?column?
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
RESET client_min_messages;
|
||||||
|
-- should error since col_3 defaults to a sequence
|
||||||
|
CREATE TABLE set_on_default_test_referencing(
|
||||||
|
col_1 int, col_2 int, col_3 serial, col_4 int,
|
||||||
|
FOREIGN KEY(col_1, col_3)
|
||||||
|
REFERENCES set_on_default_test_referenced(col_1, col_3)
|
||||||
|
ON DELETE SET DEFAULT (col_3)
|
||||||
|
);
|
||||||
|
ERROR: cannot create foreign key constraint since Citus does not support ON DELETE / UPDATE SET DEFAULT actions on the columns that default to sequences
|
||||||
|
--
|
||||||
|
-- PG15 has suppressed some casts on constants when querying foreign tables
|
||||||
|
-- For example, we can use text to represent a type that's an enum on the remote side
|
||||||
|
-- A comparison on such a column will get shipped as "var = 'foo'::text"
|
||||||
|
-- But there's no enum = text operator on the remote side
|
||||||
|
-- If we leave off the explicit cast, the comparison will work
|
||||||
|
-- Test we behave in the same way with a Citus foreign table
|
||||||
|
-- Reminder: foreign tables cannot be distributed/reference, can only be Citus local
|
||||||
|
-- Relevant PG commit:
|
||||||
|
-- f8abb0f5e114d8c309239f0faa277b97f696d829
|
||||||
|
--
|
||||||
|
\set VERBOSITY terse
|
||||||
|
SET citus.next_shard_id TO 960200;
|
||||||
|
SET citus.enable_local_execution TO ON;
|
||||||
|
-- add the foreign table to metadata with the guc
|
||||||
|
SET citus.use_citus_managed_tables TO ON;
|
||||||
|
CREATE TYPE user_enum AS ENUM ('foo', 'bar', 'buz');
|
||||||
|
CREATE TABLE foreign_table_test (c0 integer NOT NULL, c1 user_enum);
|
||||||
|
INSERT INTO foreign_table_test VALUES (1, 'foo');
|
||||||
|
CREATE EXTENSION postgres_fdw;
|
||||||
|
CREATE SERVER foreign_server
|
||||||
|
FOREIGN DATA WRAPPER postgres_fdw
|
||||||
|
OPTIONS (host 'localhost', port :'master_port', dbname 'regression');
|
||||||
|
CREATE USER MAPPING FOR CURRENT_USER
|
||||||
|
SERVER foreign_server
|
||||||
|
OPTIONS (user 'postgres');
|
||||||
|
CREATE FOREIGN TABLE foreign_table (
|
||||||
|
c0 integer NOT NULL,
|
||||||
|
c1 text
|
||||||
|
)
|
||||||
|
SERVER foreign_server
|
||||||
|
OPTIONS (schema_name 'pg15', table_name 'foreign_table_test');
|
||||||
|
-- check that the foreign table is a citus local table
|
||||||
|
SELECT partmethod, repmodel FROM pg_dist_partition WHERE logicalrelid = 'foreign_table'::regclass ORDER BY logicalrelid;
|
||||||
|
partmethod | repmodel
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
n | s
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- same tests as in the relevant PG commit
|
||||||
|
-- Check that Remote SQL in the EXPLAIN doesn't contain casting
|
||||||
|
EXPLAIN (VERBOSE, COSTS OFF)
|
||||||
|
SELECT * FROM foreign_table WHERE c1 = 'foo' LIMIT 1;
|
||||||
|
QUERY PLAN
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
Custom Scan (Citus Adaptive)
|
||||||
|
Output: remote_scan.c0, remote_scan.c1
|
||||||
|
Task Count: 1
|
||||||
|
Tasks Shown: All
|
||||||
|
-> Task
|
||||||
|
Query: SELECT c0, c1 FROM pg15.foreign_table_960201 foreign_table WHERE (c1 OPERATOR(pg_catalog.=) 'foo'::text) LIMIT 1
|
||||||
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
|
-> Foreign Scan on pg15.foreign_table_960201 foreign_table
|
||||||
|
Output: c0, c1
|
||||||
|
Remote SQL: SELECT c0, c1 FROM pg15.foreign_table_test WHERE ((c1 = 'foo')) LIMIT 1::bigint
|
||||||
|
(10 rows)
|
||||||
|
|
||||||
|
SELECT * FROM foreign_table WHERE c1 = 'foo' LIMIT 1;
|
||||||
|
c0 | c1
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1 | foo
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- Check that Remote SQL in the EXPLAIN doesn't contain casting
|
||||||
|
EXPLAIN (VERBOSE, COSTS OFF)
|
||||||
|
SELECT * FROM foreign_table WHERE 'foo' = c1 LIMIT 1;
|
||||||
|
QUERY PLAN
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
Custom Scan (Citus Adaptive)
|
||||||
|
Output: remote_scan.c0, remote_scan.c1
|
||||||
|
Task Count: 1
|
||||||
|
Tasks Shown: All
|
||||||
|
-> Task
|
||||||
|
Query: SELECT c0, c1 FROM pg15.foreign_table_960201 foreign_table WHERE ('foo'::text OPERATOR(pg_catalog.=) c1) LIMIT 1
|
||||||
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
|
-> Foreign Scan on pg15.foreign_table_960201 foreign_table
|
||||||
|
Output: c0, c1
|
||||||
|
Remote SQL: SELECT c0, c1 FROM pg15.foreign_table_test WHERE (('foo' = c1)) LIMIT 1::bigint
|
||||||
|
(10 rows)
|
||||||
|
|
||||||
|
SELECT * FROM foreign_table WHERE 'foo' = c1 LIMIT 1;
|
||||||
|
c0 | c1
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1 | foo
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- we declared c1 to be text locally, but it's still the same type on
|
||||||
|
-- the remote which will balk if we try to do anything incompatible
|
||||||
|
-- with that remote type
|
||||||
|
SELECT * FROM foreign_table WHERE c1 LIKE 'foo' LIMIT 1; -- ERROR
|
||||||
|
ERROR: operator does not exist: pg15.user_enum ~~ unknown
|
||||||
|
SELECT * FROM foreign_table WHERE c1::text LIKE 'foo' LIMIT 1; -- ERROR; cast not pushed down
|
||||||
|
ERROR: operator does not exist: pg15.user_enum ~~ unknown
|
||||||
|
-- Clean up foreign table test
|
||||||
|
RESET citus.use_citus_managed_tables;
|
||||||
|
SELECT undistribute_table('foreign_table');
|
||||||
|
NOTICE: creating a new table for pg15.foreign_table
|
||||||
|
NOTICE: dropping the old pg15.foreign_table
|
||||||
|
NOTICE: renaming the new table to pg15.foreign_table
|
||||||
|
undistribute_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT undistribute_table('foreign_table_test');
|
||||||
|
NOTICE: creating a new table for pg15.foreign_table_test
|
||||||
|
NOTICE: moving the data of pg15.foreign_table_test
|
||||||
|
NOTICE: dropping the old pg15.foreign_table_test
|
||||||
|
NOTICE: renaming the new table to pg15.foreign_table_test
|
||||||
|
undistribute_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT 1 FROM citus_remove_node('localhost', :master_port);
|
||||||
|
?column?
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
DROP SERVER foreign_server CASCADE;
|
||||||
|
NOTICE: drop cascades to 2 other objects
|
||||||
|
-- PG15 now supports specifying oid on CREATE DATABASE
|
||||||
|
-- verify that we print meaningful notice messages.
|
||||||
|
CREATE DATABASE db_with_oid OID 987654;
|
||||||
|
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
|
||||||
|
DROP DATABASE db_with_oid;
|
||||||
-- Clean up
|
-- Clean up
|
||||||
RESET citus.shard_replication_factor;
|
|
||||||
\set VERBOSITY terse
|
\set VERBOSITY terse
|
||||||
SET client_min_messages TO ERROR;
|
SET client_min_messages TO ERROR;
|
||||||
DROP SCHEMA pg15 CASCADE;
|
DROP SCHEMA pg15 CASCADE;
|
||||||
|
|
|
@ -0,0 +1,409 @@
|
||||||
|
--
|
||||||
|
-- PG15 jsonpath tests
|
||||||
|
-- Relevant pg commit: e26114c817b610424010cfbe91a743f591246ff1
|
||||||
|
--
|
||||||
|
SHOW server_version \gset
|
||||||
|
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||||
|
\gset
|
||||||
|
\if :server_version_ge_15
|
||||||
|
\else
|
||||||
|
\q
|
||||||
|
\endif
|
||||||
|
CREATE SCHEMA jsonpath;
|
||||||
|
SET search_path TO jsonpath;
|
||||||
|
CREATE TABLE jsonpath_test (id serial, sample text);
|
||||||
|
SELECT create_distributed_table('jsonpath_test', 'id');
|
||||||
|
create_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
\COPY jsonpath_test(sample) FROM STDIN
|
||||||
|
-- Cast the text into jsonpath on the worker nodes.
|
||||||
|
SELECT sample, sample::jsonpath FROM jsonpath_test ORDER BY id;
|
||||||
|
sample | sample
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
$ | $
|
||||||
|
strict $ | strict $
|
||||||
|
lax $ | $
|
||||||
|
$.a | $."a"
|
||||||
|
$.a.v | $."a"."v"
|
||||||
|
$.a.* | $."a".*
|
||||||
|
$.*[*] | $.*[*]
|
||||||
|
$.a[*] | $."a"[*]
|
||||||
|
$.a[*][*] | $."a"[*][*]
|
||||||
|
$[*] | $[*]
|
||||||
|
$[0] | $[0]
|
||||||
|
$[*][0] | $[*][0]
|
||||||
|
$[*].a | $[*]."a"
|
||||||
|
$[*][0].a.b | $[*][0]."a"."b"
|
||||||
|
$.a.**.b | $."a".**."b"
|
||||||
|
$.a.**{2}.b | $."a".**{2}."b"
|
||||||
|
$.a.**{2 to 2}.b | $."a".**{2}."b"
|
||||||
|
$.a.**{2 to 5}.b | $."a".**{2 to 5}."b"
|
||||||
|
$.a.**{0 to 5}.b | $."a".**{0 to 5}."b"
|
||||||
|
$.a.**{5 to last}.b | $."a".**{5 to last}."b"
|
||||||
|
$.a.**{last}.b | $."a".**{last}."b"
|
||||||
|
$.a.**{last to 5}.b | $."a".**{last to 5}."b"
|
||||||
|
$+1 | ($ + 1)
|
||||||
|
$-1 | ($ - 1)
|
||||||
|
$--+1 | ($ - -1)
|
||||||
|
$.a/+-1 | ($."a" / -1)
|
||||||
|
1 * 2 + 4 % -3 != false | (1 * 2 + 4 % -3 != false)
|
||||||
|
$.g ? ($.a == 1) | $."g"?($."a" == 1)
|
||||||
|
$.g ? (@ == 1) | $."g"?(@ == 1)
|
||||||
|
$.g ? (@.a == 1) | $."g"?(@."a" == 1)
|
||||||
|
$.g ? (@.a == 1 || @.a == 4) | $."g"?(@."a" == 1 || @."a" == 4)
|
||||||
|
$.g ? (@.a == 1 && @.a == 4) | $."g"?(@."a" == 1 && @."a" == 4)
|
||||||
|
$.g ? (@.a == 1 || @.a == 4 && @.b == 7) | $."g"?(@."a" == 1 || @."a" == 4 && @."b" == 7)
|
||||||
|
$.g ? (@.a == 1 || !(@.a == 4) && @.b == 7) | $."g"?(@."a" == 1 || !(@."a" == 4) && @."b" == 7)
|
||||||
|
$.g ? (@.a == 1 || !(@.x >= 123 || @.a == 4) && @.b == 7) | $."g"?(@."a" == 1 || !(@."x" >= 123 || @."a" == 4) && @."b" == 7)
|
||||||
|
$.g ? (@.x >= @[*]?(@.a > "abc")) | $."g"?(@."x" >= @[*]?(@."a" > "abc"))
|
||||||
|
$.g ? ((@.x >= 123 || @.a == 4) is unknown) | $."g"?((@."x" >= 123 || @."a" == 4) is unknown)
|
||||||
|
$.g ? (exists (@.x)) | $."g"?(exists (@."x"))
|
||||||
|
$.g ? (exists (@.x ? (@ == 14))) | $."g"?(exists (@."x"?(@ == 14)))
|
||||||
|
$.g ? ((@.x >= 123 || @.a == 4) && exists (@.x ? (@ == 14))) | $."g"?((@."x" >= 123 || @."a" == 4) && exists (@."x"?(@ == 14)))
|
||||||
|
$.g ? (+@.x >= +-(+@.a + 2)) | $."g"?(+@."x" >= +(-(+@."a" + 2)))
|
||||||
|
$a | $"a"
|
||||||
|
$a.b | $"a"."b"
|
||||||
|
$a[*] | $"a"[*]
|
||||||
|
$.g ? (@.zip == $zip) | $."g"?(@."zip" == $"zip")
|
||||||
|
$.a[1,2, 3 to 16] | $."a"[1,2,3 to 16]
|
||||||
|
$.a[$a + 1, ($b[*]) to -($[0] * 2)] | $."a"[$"a" + 1,$"b"[*] to -($[0] * 2)]
|
||||||
|
$.a[$.a.size() - 3] | $."a"[$."a".size() - 3]
|
||||||
|
"last" | "last"
|
||||||
|
$.last | $."last"
|
||||||
|
$[last] | $[last]
|
||||||
|
$[$[0] ? (last > 0)] | $[$[0]?(last > 0)]
|
||||||
|
null.type() | null.type()
|
||||||
|
(1).type() | (1).type()
|
||||||
|
1.2.type() | (1.2).type()
|
||||||
|
"aaa".type() | "aaa".type()
|
||||||
|
true.type() | true.type()
|
||||||
|
$.double().floor().ceiling().abs() | $.double().floor().ceiling().abs()
|
||||||
|
$.keyvalue().key | $.keyvalue()."key"
|
||||||
|
$.datetime() | $.datetime()
|
||||||
|
$.datetime("datetime template") | $.datetime("datetime template")
|
||||||
|
$ ? (@ starts with "abc") | $?(@ starts with "abc")
|
||||||
|
$ ? (@ starts with $var) | $?(@ starts with $"var")
|
||||||
|
$ ? (@ like_regex "pattern") | $?(@ like_regex "pattern")
|
||||||
|
$ ? (@ like_regex "pattern" flag "") | $?(@ like_regex "pattern")
|
||||||
|
$ ? (@ like_regex "pattern" flag "i") | $?(@ like_regex "pattern" flag "i")
|
||||||
|
$ ? (@ like_regex "pattern" flag "is") | $?(@ like_regex "pattern" flag "is")
|
||||||
|
$ ? (@ like_regex "pattern" flag "isim") | $?(@ like_regex "pattern" flag "ism")
|
||||||
|
$ ? (@ like_regex "pattern" flag "q") | $?(@ like_regex "pattern" flag "q")
|
||||||
|
$ ? (@ like_regex "pattern" flag "iq") | $?(@ like_regex "pattern" flag "iq")
|
||||||
|
$ ? (@ like_regex "pattern" flag "smixq") | $?(@ like_regex "pattern" flag "ismxq")
|
||||||
|
$ < 1 | ($ < 1)
|
||||||
|
($ < 1) || $.a.b <= $x | ($ < 1 || $."a"."b" <= $"x")
|
||||||
|
($).a.b | $."a"."b"
|
||||||
|
($.a.b).c.d | $."a"."b"."c"."d"
|
||||||
|
($.a.b + -$.x.y).c.d | ($."a"."b" + -$."x"."y")."c"."d"
|
||||||
|
(-+$.a.b).c.d | (-(+$."a"."b"))."c"."d"
|
||||||
|
1 + ($.a.b + 2).c.d | (1 + ($."a"."b" + 2)."c"."d")
|
||||||
|
1 + ($.a.b > 2).c.d | (1 + ($."a"."b" > 2)."c"."d")
|
||||||
|
($) | $
|
||||||
|
(($)) | $
|
||||||
|
((($ + 1)).a + ((2)).b ? ((((@ > 1)) || (exists(@.c))))) | (($ + 1)."a" + (2)."b"?(@ > 1 || exists (@."c")))
|
||||||
|
$ ? (@.a < 1) | $?(@."a" < 1)
|
||||||
|
$ ? (@.a < -1) | $?(@."a" < -1)
|
||||||
|
$ ? (@.a < +1) | $?(@."a" < 1)
|
||||||
|
$ ? (@.a < .1) | $?(@."a" < 0.1)
|
||||||
|
$ ? (@.a < -.1) | $?(@."a" < -0.1)
|
||||||
|
$ ? (@.a < +.1) | $?(@."a" < 0.1)
|
||||||
|
$ ? (@.a < 0.1) | $?(@."a" < 0.1)
|
||||||
|
$ ? (@.a < -0.1) | $?(@."a" < -0.1)
|
||||||
|
$ ? (@.a < +0.1) | $?(@."a" < 0.1)
|
||||||
|
$ ? (@.a < 10.1) | $?(@."a" < 10.1)
|
||||||
|
$ ? (@.a < -10.1) | $?(@."a" < -10.1)
|
||||||
|
$ ? (@.a < +10.1) | $?(@."a" < 10.1)
|
||||||
|
$ ? (@.a < 1e1) | $?(@."a" < 10)
|
||||||
|
$ ? (@.a < -1e1) | $?(@."a" < -10)
|
||||||
|
$ ? (@.a < +1e1) | $?(@."a" < 10)
|
||||||
|
$ ? (@.a < .1e1) | $?(@."a" < 1)
|
||||||
|
$ ? (@.a < -.1e1) | $?(@."a" < -1)
|
||||||
|
$ ? (@.a < +.1e1) | $?(@."a" < 1)
|
||||||
|
$ ? (@.a < 0.1e1) | $?(@."a" < 1)
|
||||||
|
$ ? (@.a < -0.1e1) | $?(@."a" < -1)
|
||||||
|
$ ? (@.a < +0.1e1) | $?(@."a" < 1)
|
||||||
|
$ ? (@.a < 10.1e1) | $?(@."a" < 101)
|
||||||
|
$ ? (@.a < -10.1e1) | $?(@."a" < -101)
|
||||||
|
$ ? (@.a < +10.1e1) | $?(@."a" < 101)
|
||||||
|
$ ? (@.a < 1e-1) | $?(@."a" < 0.1)
|
||||||
|
$ ? (@.a < -1e-1) | $?(@."a" < -0.1)
|
||||||
|
$ ? (@.a < +1e-1) | $?(@."a" < 0.1)
|
||||||
|
$ ? (@.a < .1e-1) | $?(@."a" < 0.01)
|
||||||
|
$ ? (@.a < -.1e-1) | $?(@."a" < -0.01)
|
||||||
|
$ ? (@.a < +.1e-1) | $?(@."a" < 0.01)
|
||||||
|
$ ? (@.a < 0.1e-1) | $?(@."a" < 0.01)
|
||||||
|
$ ? (@.a < -0.1e-1) | $?(@."a" < -0.01)
|
||||||
|
$ ? (@.a < +0.1e-1) | $?(@."a" < 0.01)
|
||||||
|
$ ? (@.a < 10.1e-1) | $?(@."a" < 1.01)
|
||||||
|
$ ? (@.a < -10.1e-1) | $?(@."a" < -1.01)
|
||||||
|
$ ? (@.a < +10.1e-1) | $?(@."a" < 1.01)
|
||||||
|
$ ? (@.a < 1e+1) | $?(@."a" < 10)
|
||||||
|
$ ? (@.a < -1e+1) | $?(@."a" < -10)
|
||||||
|
$ ? (@.a < +1e+1) | $?(@."a" < 10)
|
||||||
|
$ ? (@.a < .1e+1) | $?(@."a" < 1)
|
||||||
|
$ ? (@.a < -.1e+1) | $?(@."a" < -1)
|
||||||
|
$ ? (@.a < +.1e+1) | $?(@."a" < 1)
|
||||||
|
$ ? (@.a < 0.1e+1) | $?(@."a" < 1)
|
||||||
|
$ ? (@.a < -0.1e+1) | $?(@."a" < -1)
|
||||||
|
$ ? (@.a < +0.1e+1) | $?(@."a" < 1)
|
||||||
|
$ ? (@.a < 10.1e+1) | $?(@."a" < 101)
|
||||||
|
$ ? (@.a < -10.1e+1) | $?(@."a" < -101)
|
||||||
|
$ ? (@.a < +10.1e+1) | $?(@."a" < 101)
|
||||||
|
0 | 0
|
||||||
|
0.0 | 0.0
|
||||||
|
0.000 | 0.000
|
||||||
|
0.000e1 | 0.00
|
||||||
|
0.000e2 | 0.0
|
||||||
|
0.000e3 | 0
|
||||||
|
0.0010 | 0.0010
|
||||||
|
0.0010e-1 | 0.00010
|
||||||
|
0.0010e+1 | 0.010
|
||||||
|
0.0010e+2 | 0.10
|
||||||
|
.001 | 0.001
|
||||||
|
.001e1 | 0.01
|
||||||
|
1. | 1
|
||||||
|
1.e1 | 10
|
||||||
|
1.2.e | (1.2)."e"
|
||||||
|
(1.2).e | (1.2)."e"
|
||||||
|
1e3 | 1000
|
||||||
|
1.e3 | 1000
|
||||||
|
1.e3.e | (1000)."e"
|
||||||
|
1.e3.e4 | (1000)."e4"
|
||||||
|
1.2e3 | 1200
|
||||||
|
1.2.e3 | (1.2)."e3"
|
||||||
|
(1.2).e3 | (1.2)."e3"
|
||||||
|
1..e | (1)."e"
|
||||||
|
1..e3 | (1)."e3"
|
||||||
|
(1.).e | (1)."e"
|
||||||
|
(1.).e3 | (1)."e3"
|
||||||
|
1?(2>3) | (1)?(2 > 3)
|
||||||
|
(158 rows)
|
||||||
|
|
||||||
|
-- Pull the data, and cast on the coordinator node
|
||||||
|
WITH samples as (SELECT id, sample FROM jsonpath_test OFFSET 0)
|
||||||
|
SELECT sample, sample::jsonpath FROM samples ORDER BY id;
|
||||||
|
sample | sample
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
$ | $
|
||||||
|
strict $ | strict $
|
||||||
|
lax $ | $
|
||||||
|
$.a | $."a"
|
||||||
|
$.a.v | $."a"."v"
|
||||||
|
$.a.* | $."a".*
|
||||||
|
$.*[*] | $.*[*]
|
||||||
|
$.a[*] | $."a"[*]
|
||||||
|
$.a[*][*] | $."a"[*][*]
|
||||||
|
$[*] | $[*]
|
||||||
|
$[0] | $[0]
|
||||||
|
$[*][0] | $[*][0]
|
||||||
|
$[*].a | $[*]."a"
|
||||||
|
$[*][0].a.b | $[*][0]."a"."b"
|
||||||
|
$.a.**.b | $."a".**."b"
|
||||||
|
$.a.**{2}.b | $."a".**{2}."b"
|
||||||
|
$.a.**{2 to 2}.b | $."a".**{2}."b"
|
||||||
|
$.a.**{2 to 5}.b | $."a".**{2 to 5}."b"
|
||||||
|
$.a.**{0 to 5}.b | $."a".**{0 to 5}."b"
|
||||||
|
$.a.**{5 to last}.b | $."a".**{5 to last}."b"
|
||||||
|
$.a.**{last}.b | $."a".**{last}."b"
|
||||||
|
$.a.**{last to 5}.b | $."a".**{last to 5}."b"
|
||||||
|
$+1 | ($ + 1)
|
||||||
|
$-1 | ($ - 1)
|
||||||
|
$--+1 | ($ - -1)
|
||||||
|
$.a/+-1 | ($."a" / -1)
|
||||||
|
1 * 2 + 4 % -3 != false | (1 * 2 + 4 % -3 != false)
|
||||||
|
$.g ? ($.a == 1) | $."g"?($."a" == 1)
|
||||||
|
$.g ? (@ == 1) | $."g"?(@ == 1)
|
||||||
|
$.g ? (@.a == 1) | $."g"?(@."a" == 1)
|
||||||
|
$.g ? (@.a == 1 || @.a == 4) | $."g"?(@."a" == 1 || @."a" == 4)
|
||||||
|
$.g ? (@.a == 1 && @.a == 4) | $."g"?(@."a" == 1 && @."a" == 4)
|
||||||
|
$.g ? (@.a == 1 || @.a == 4 && @.b == 7) | $."g"?(@."a" == 1 || @."a" == 4 && @."b" == 7)
|
||||||
|
$.g ? (@.a == 1 || !(@.a == 4) && @.b == 7) | $."g"?(@."a" == 1 || !(@."a" == 4) && @."b" == 7)
|
||||||
|
$.g ? (@.a == 1 || !(@.x >= 123 || @.a == 4) && @.b == 7) | $."g"?(@."a" == 1 || !(@."x" >= 123 || @."a" == 4) && @."b" == 7)
|
||||||
|
$.g ? (@.x >= @[*]?(@.a > "abc")) | $."g"?(@."x" >= @[*]?(@."a" > "abc"))
|
||||||
|
$.g ? ((@.x >= 123 || @.a == 4) is unknown) | $."g"?((@."x" >= 123 || @."a" == 4) is unknown)
|
||||||
|
$.g ? (exists (@.x)) | $."g"?(exists (@."x"))
|
||||||
|
$.g ? (exists (@.x ? (@ == 14))) | $."g"?(exists (@."x"?(@ == 14)))
|
||||||
|
$.g ? ((@.x >= 123 || @.a == 4) && exists (@.x ? (@ == 14))) | $."g"?((@."x" >= 123 || @."a" == 4) && exists (@."x"?(@ == 14)))
|
||||||
|
$.g ? (+@.x >= +-(+@.a + 2)) | $."g"?(+@."x" >= +(-(+@."a" + 2)))
|
||||||
|
$a | $"a"
|
||||||
|
$a.b | $"a"."b"
|
||||||
|
$a[*] | $"a"[*]
|
||||||
|
$.g ? (@.zip == $zip) | $."g"?(@."zip" == $"zip")
|
||||||
|
$.a[1,2, 3 to 16] | $."a"[1,2,3 to 16]
|
||||||
|
$.a[$a + 1, ($b[*]) to -($[0] * 2)] | $."a"[$"a" + 1,$"b"[*] to -($[0] * 2)]
|
||||||
|
$.a[$.a.size() - 3] | $."a"[$."a".size() - 3]
|
||||||
|
"last" | "last"
|
||||||
|
$.last | $."last"
|
||||||
|
$[last] | $[last]
|
||||||
|
$[$[0] ? (last > 0)] | $[$[0]?(last > 0)]
|
||||||
|
null.type() | null.type()
|
||||||
|
(1).type() | (1).type()
|
||||||
|
1.2.type() | (1.2).type()
|
||||||
|
"aaa".type() | "aaa".type()
|
||||||
|
true.type() | true.type()
|
||||||
|
$.double().floor().ceiling().abs() | $.double().floor().ceiling().abs()
|
||||||
|
$.keyvalue().key | $.keyvalue()."key"
|
||||||
|
$.datetime() | $.datetime()
|
||||||
|
$.datetime("datetime template") | $.datetime("datetime template")
|
||||||
|
$ ? (@ starts with "abc") | $?(@ starts with "abc")
|
||||||
|
$ ? (@ starts with $var) | $?(@ starts with $"var")
|
||||||
|
$ ? (@ like_regex "pattern") | $?(@ like_regex "pattern")
|
||||||
|
$ ? (@ like_regex "pattern" flag "") | $?(@ like_regex "pattern")
|
||||||
|
$ ? (@ like_regex "pattern" flag "i") | $?(@ like_regex "pattern" flag "i")
|
||||||
|
$ ? (@ like_regex "pattern" flag "is") | $?(@ like_regex "pattern" flag "is")
|
||||||
|
$ ? (@ like_regex "pattern" flag "isim") | $?(@ like_regex "pattern" flag "ism")
|
||||||
|
$ ? (@ like_regex "pattern" flag "q") | $?(@ like_regex "pattern" flag "q")
|
||||||
|
$ ? (@ like_regex "pattern" flag "iq") | $?(@ like_regex "pattern" flag "iq")
|
||||||
|
$ ? (@ like_regex "pattern" flag "smixq") | $?(@ like_regex "pattern" flag "ismxq")
|
||||||
|
$ < 1 | ($ < 1)
|
||||||
|
($ < 1) || $.a.b <= $x | ($ < 1 || $."a"."b" <= $"x")
|
||||||
|
($).a.b | $."a"."b"
|
||||||
|
($.a.b).c.d | $."a"."b"."c"."d"
|
||||||
|
($.a.b + -$.x.y).c.d | ($."a"."b" + -$."x"."y")."c"."d"
|
||||||
|
(-+$.a.b).c.d | (-(+$."a"."b"))."c"."d"
|
||||||
|
1 + ($.a.b + 2).c.d | (1 + ($."a"."b" + 2)."c"."d")
|
||||||
|
1 + ($.a.b > 2).c.d | (1 + ($."a"."b" > 2)."c"."d")
|
||||||
|
($) | $
|
||||||
|
(($)) | $
|
||||||
|
((($ + 1)).a + ((2)).b ? ((((@ > 1)) || (exists(@.c))))) | (($ + 1)."a" + (2)."b"?(@ > 1 || exists (@."c")))
|
||||||
|
$ ? (@.a < 1) | $?(@."a" < 1)
|
||||||
|
$ ? (@.a < -1) | $?(@."a" < -1)
|
||||||
|
$ ? (@.a < +1) | $?(@."a" < 1)
|
||||||
|
$ ? (@.a < .1) | $?(@."a" < 0.1)
|
||||||
|
$ ? (@.a < -.1) | $?(@."a" < -0.1)
|
||||||
|
$ ? (@.a < +.1) | $?(@."a" < 0.1)
|
||||||
|
$ ? (@.a < 0.1) | $?(@."a" < 0.1)
|
||||||
|
$ ? (@.a < -0.1) | $?(@."a" < -0.1)
|
||||||
|
$ ? (@.a < +0.1) | $?(@."a" < 0.1)
|
||||||
|
$ ? (@.a < 10.1) | $?(@."a" < 10.1)
|
||||||
|
$ ? (@.a < -10.1) | $?(@."a" < -10.1)
|
||||||
|
$ ? (@.a < +10.1) | $?(@."a" < 10.1)
|
||||||
|
$ ? (@.a < 1e1) | $?(@."a" < 10)
|
||||||
|
$ ? (@.a < -1e1) | $?(@."a" < -10)
|
||||||
|
$ ? (@.a < +1e1) | $?(@."a" < 10)
|
||||||
|
$ ? (@.a < .1e1) | $?(@."a" < 1)
|
||||||
|
$ ? (@.a < -.1e1) | $?(@."a" < -1)
|
||||||
|
$ ? (@.a < +.1e1) | $?(@."a" < 1)
|
||||||
|
$ ? (@.a < 0.1e1) | $?(@."a" < 1)
|
||||||
|
$ ? (@.a < -0.1e1) | $?(@."a" < -1)
|
||||||
|
$ ? (@.a < +0.1e1) | $?(@."a" < 1)
|
||||||
|
$ ? (@.a < 10.1e1) | $?(@."a" < 101)
|
||||||
|
$ ? (@.a < -10.1e1) | $?(@."a" < -101)
|
||||||
|
$ ? (@.a < +10.1e1) | $?(@."a" < 101)
|
||||||
|
$ ? (@.a < 1e-1) | $?(@."a" < 0.1)
|
||||||
|
$ ? (@.a < -1e-1) | $?(@."a" < -0.1)
|
||||||
|
$ ? (@.a < +1e-1) | $?(@."a" < 0.1)
|
||||||
|
$ ? (@.a < .1e-1) | $?(@."a" < 0.01)
|
||||||
|
$ ? (@.a < -.1e-1) | $?(@."a" < -0.01)
|
||||||
|
$ ? (@.a < +.1e-1) | $?(@."a" < 0.01)
|
||||||
|
$ ? (@.a < 0.1e-1) | $?(@."a" < 0.01)
|
||||||
|
$ ? (@.a < -0.1e-1) | $?(@."a" < -0.01)
|
||||||
|
$ ? (@.a < +0.1e-1) | $?(@."a" < 0.01)
|
||||||
|
$ ? (@.a < 10.1e-1) | $?(@."a" < 1.01)
|
||||||
|
$ ? (@.a < -10.1e-1) | $?(@."a" < -1.01)
|
||||||
|
$ ? (@.a < +10.1e-1) | $?(@."a" < 1.01)
|
||||||
|
$ ? (@.a < 1e+1) | $?(@."a" < 10)
|
||||||
|
$ ? (@.a < -1e+1) | $?(@."a" < -10)
|
||||||
|
$ ? (@.a < +1e+1) | $?(@."a" < 10)
|
||||||
|
$ ? (@.a < .1e+1) | $?(@."a" < 1)
|
||||||
|
$ ? (@.a < -.1e+1) | $?(@."a" < -1)
|
||||||
|
$ ? (@.a < +.1e+1) | $?(@."a" < 1)
|
||||||
|
$ ? (@.a < 0.1e+1) | $?(@."a" < 1)
|
||||||
|
$ ? (@.a < -0.1e+1) | $?(@."a" < -1)
|
||||||
|
$ ? (@.a < +0.1e+1) | $?(@."a" < 1)
|
||||||
|
$ ? (@.a < 10.1e+1) | $?(@."a" < 101)
|
||||||
|
$ ? (@.a < -10.1e+1) | $?(@."a" < -101)
|
||||||
|
$ ? (@.a < +10.1e+1) | $?(@."a" < 101)
|
||||||
|
0 | 0
|
||||||
|
0.0 | 0.0
|
||||||
|
0.000 | 0.000
|
||||||
|
0.000e1 | 0.00
|
||||||
|
0.000e2 | 0.0
|
||||||
|
0.000e3 | 0
|
||||||
|
0.0010 | 0.0010
|
||||||
|
0.0010e-1 | 0.00010
|
||||||
|
0.0010e+1 | 0.010
|
||||||
|
0.0010e+2 | 0.10
|
||||||
|
.001 | 0.001
|
||||||
|
.001e1 | 0.01
|
||||||
|
1. | 1
|
||||||
|
1.e1 | 10
|
||||||
|
1.2.e | (1.2)."e"
|
||||||
|
(1.2).e | (1.2)."e"
|
||||||
|
1e3 | 1000
|
||||||
|
1.e3 | 1000
|
||||||
|
1.e3.e | (1000)."e"
|
||||||
|
1.e3.e4 | (1000)."e4"
|
||||||
|
1.2e3 | 1200
|
||||||
|
1.2.e3 | (1.2)."e3"
|
||||||
|
(1.2).e3 | (1.2)."e3"
|
||||||
|
1..e | (1)."e"
|
||||||
|
1..e3 | (1)."e3"
|
||||||
|
(1.).e | (1)."e"
|
||||||
|
(1.).e3 | (1)."e3"
|
||||||
|
1?(2>3) | (1)?(2 > 3)
|
||||||
|
(158 rows)
|
||||||
|
|
||||||
|
-- now test some cases where trailing junk causes errors
|
||||||
|
\COPY jsonpath_test(sample) FROM STDIN
|
||||||
|
-- the following tests try to evaluate type casting on worker, followed by coordinator
|
||||||
|
SELECT sample, sample::jsonpath FROM jsonpath_test WHERE sample = '';
|
||||||
|
ERROR: invalid input syntax for type jsonpath: ""
|
||||||
|
CONTEXT: while executing command on localhost:xxxxx
|
||||||
|
WITH samples as (SELECT id, sample FROM jsonpath_test WHERE sample = '' OFFSET 0)
|
||||||
|
SELECT sample, sample::jsonpath FROM samples;
|
||||||
|
ERROR: invalid input syntax for type jsonpath: ""
|
||||||
|
SELECT sample, sample::jsonpath FROM jsonpath_test WHERE sample = 'last';
|
||||||
|
ERROR: LAST is allowed only in array subscripts
|
||||||
|
CONTEXT: while executing command on localhost:xxxxx
|
||||||
|
WITH samples as (SELECT id, sample FROM jsonpath_test WHERE sample = 'last' OFFSET 0)
|
||||||
|
SELECT sample, sample::jsonpath FROM samples;
|
||||||
|
ERROR: LAST is allowed only in array subscripts
|
||||||
|
SELECT sample, sample::jsonpath FROM jsonpath_test WHERE sample = '1.type()';
|
||||||
|
ERROR: trailing junk after numeric literal at or near "1.t" of jsonpath input
|
||||||
|
CONTEXT: while executing command on localhost:xxxxx
|
||||||
|
WITH samples as (SELECT id, sample FROM jsonpath_test WHERE sample = '1.type()' OFFSET 0)
|
||||||
|
SELECT sample, sample::jsonpath FROM samples;
|
||||||
|
ERROR: trailing junk after numeric literal at or near "1.t" of jsonpath input
|
||||||
|
SELECT sample, sample::jsonpath FROM jsonpath_test WHERE sample = '$ ? (@ like_regex "(invalid pattern")';
|
||||||
|
ERROR: invalid regular expression: parentheses () not balanced
|
||||||
|
CONTEXT: while executing command on localhost:xxxxx
|
||||||
|
WITH samples as (SELECT id, sample FROM jsonpath_test WHERE sample = '$ ? (@ like_regex "(invalid pattern")' OFFSET 0)
|
||||||
|
SELECT sample, sample::jsonpath FROM samples;
|
||||||
|
ERROR: invalid regular expression: parentheses () not balanced
|
||||||
|
SELECT sample, sample::jsonpath FROM jsonpath_test WHERE sample = '$ ? (@ like_regex "pattern" flag "xsms")';
|
||||||
|
ERROR: XQuery "x" flag (expanded regular expressions) is not implemented
|
||||||
|
CONTEXT: while executing command on localhost:xxxxx
|
||||||
|
WITH samples as (SELECT id, sample FROM jsonpath_test WHERE sample = '$ ? (@ like_regex "pattern" flag "xsms")' OFFSET 0)
|
||||||
|
SELECT sample, sample::jsonpath FROM samples;
|
||||||
|
ERROR: XQuery "x" flag (expanded regular expressions) is not implemented
|
||||||
|
SELECT sample, sample::jsonpath FROM jsonpath_test WHERE sample = '@ + 1';
|
||||||
|
ERROR: @ is not allowed in root expressions
|
||||||
|
CONTEXT: while executing command on localhost:xxxxx
|
||||||
|
WITH samples as (SELECT id, sample FROM jsonpath_test WHERE sample = '@ + 1' OFFSET 0)
|
||||||
|
SELECT sample, sample::jsonpath FROM samples;
|
||||||
|
ERROR: @ is not allowed in root expressions
|
||||||
|
SELECT sample, sample::jsonpath FROM jsonpath_test WHERE sample = '00';
|
||||||
|
ERROR: trailing junk after numeric literal at or near "00" of jsonpath input
|
||||||
|
CONTEXT: while executing command on localhost:xxxxx
|
||||||
|
WITH samples as (SELECT id, sample FROM jsonpath_test WHERE sample = '00' OFFSET 0)
|
||||||
|
SELECT sample, sample::jsonpath FROM samples;
|
||||||
|
ERROR: trailing junk after numeric literal at or near "00" of jsonpath input
|
||||||
|
SELECT sample, sample::jsonpath FROM jsonpath_test WHERE sample = '1.e';
|
||||||
|
ERROR: trailing junk after numeric literal at or near "1.e" of jsonpath input
|
||||||
|
CONTEXT: while executing command on localhost:xxxxx
|
||||||
|
WITH samples as (SELECT id, sample FROM jsonpath_test WHERE sample = '1.e' OFFSET 0)
|
||||||
|
SELECT sample, sample::jsonpath FROM samples;
|
||||||
|
ERROR: trailing junk after numeric literal at or near "1.e" of jsonpath input
|
||||||
|
SELECT sample, sample::jsonpath FROM jsonpath_test WHERE sample = '1.2e3a';
|
||||||
|
ERROR: trailing junk after numeric literal at or near "1.2e3a" of jsonpath input
|
||||||
|
CONTEXT: while executing command on localhost:xxxxx
|
||||||
|
WITH samples as (SELECT id, sample FROM jsonpath_test WHERE sample = '1.2e3a' OFFSET 0)
|
||||||
|
SELECT sample, sample::jsonpath FROM samples;
|
||||||
|
ERROR: trailing junk after numeric literal at or near "1.2e3a" of jsonpath input
|
||||||
|
DROP SCHEMA jsonpath CASCADE;
|
||||||
|
NOTICE: drop cascades to table jsonpath_test
|
|
@ -0,0 +1,10 @@
|
||||||
|
--
|
||||||
|
-- PG15 jsonpath tests
|
||||||
|
-- Relevant pg commit: e26114c817b610424010cfbe91a743f591246ff1
|
||||||
|
--
|
||||||
|
SHOW server_version \gset
|
||||||
|
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||||
|
\gset
|
||||||
|
\if :server_version_ge_15
|
||||||
|
\else
|
||||||
|
\q
|
|
@ -250,6 +250,52 @@ BEGIN;
|
||||||
CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE SET DEFAULT);
|
CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE SET DEFAULT);
|
||||||
ERROR: cannot switch local execution status from local execution disabled to local execution enabled since it can cause visibility problems in the current transaction
|
ERROR: cannot switch local execution status from local execution disabled to local execution enabled since it can cause visibility problems in the current transaction
|
||||||
ROLLBACK;
|
ROLLBACK;
|
||||||
|
CREATE TABLE set_on_default_test_referenced(
|
||||||
|
col_1 int, col_2 int, col_3 int, col_4 int,
|
||||||
|
unique (col_1, col_3)
|
||||||
|
);
|
||||||
|
SELECT create_reference_table('set_on_default_test_referenced');
|
||||||
|
create_reference_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- from citus local to reference - 1
|
||||||
|
CREATE TABLE set_on_default_test_referencing(
|
||||||
|
col_1 int, col_2 int, col_3 serial, col_4 int,
|
||||||
|
FOREIGN KEY(col_1, col_3)
|
||||||
|
REFERENCES set_on_default_test_referenced(col_1, col_3)
|
||||||
|
ON UPDATE SET DEFAULT
|
||||||
|
);
|
||||||
|
ERROR: cannot create foreign key constraint since Citus does not support ON DELETE / UPDATE SET DEFAULT actions on the columns that default to sequences
|
||||||
|
CREATE TABLE set_on_default_test_referencing(
|
||||||
|
col_1 serial, col_2 int, col_3 int, col_4 int
|
||||||
|
);
|
||||||
|
-- from citus local to reference - 2
|
||||||
|
ALTER TABLE set_on_default_test_referencing ADD CONSTRAINT fkey
|
||||||
|
FOREIGN KEY(col_1, col_3) REFERENCES set_on_default_test_referenced(col_1, col_3)
|
||||||
|
ON DELETE SET DEFAULT;
|
||||||
|
ERROR: cannot create foreign key constraint since Citus does not support ON DELETE / UPDATE SET DEFAULT actions on the columns that default to sequences
|
||||||
|
DROP TABLE set_on_default_test_referencing, set_on_default_test_referenced;
|
||||||
|
NOTICE: executing the command locally: DROP TABLE IF EXISTS ref_citus_local_fkeys.set_on_default_test_referenced_xxxxx CASCADE
|
||||||
|
CREATE TABLE set_on_default_test_referenced(
|
||||||
|
col_1 int, col_2 int, col_3 int, col_4 int,
|
||||||
|
unique (col_1, col_3)
|
||||||
|
);
|
||||||
|
SELECT citus_add_local_table_to_metadata('set_on_default_test_referenced');
|
||||||
|
citus_add_local_table_to_metadata
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- from citus local to citus local
|
||||||
|
CREATE TABLE set_on_default_test_referencing(
|
||||||
|
col_1 int, col_2 int, col_3 serial, col_4 int,
|
||||||
|
FOREIGN KEY(col_1, col_3)
|
||||||
|
REFERENCES set_on_default_test_referenced(col_1, col_3)
|
||||||
|
ON DELETE SET DEFAULT
|
||||||
|
);
|
||||||
|
ERROR: cannot create foreign key constraint since Citus does not support ON DELETE / UPDATE SET DEFAULT actions on the columns that default to sequences
|
||||||
-- cleanup at exit
|
-- cleanup at exit
|
||||||
DROP SCHEMA ref_citus_local_fkeys CASCADE;
|
DROP SCHEMA ref_citus_local_fkeys CASCADE;
|
||||||
NOTICE: drop cascades to 6 other objects
|
NOTICE: drop cascades to 8 other objects
|
||||||
|
|
|
@ -221,7 +221,7 @@ SELECT * FROM sale_triggers ORDER BY 1, 2;
|
||||||
-- after upgrade to PG15, test that we can't rename a distributed clone trigger
|
-- after upgrade to PG15, test that we can't rename a distributed clone trigger
|
||||||
ALTER TRIGGER "renamed_yet_another_trigger" ON "sale_alabama" RENAME TO "another_trigger_name";
|
ALTER TRIGGER "renamed_yet_another_trigger" ON "sale_alabama" RENAME TO "another_trigger_name";
|
||||||
ERROR: cannot rename trigger "renamed_yet_another_trigger" on table "sale_alabama"
|
ERROR: cannot rename trigger "renamed_yet_another_trigger" on table "sale_alabama"
|
||||||
HINT: Rename trigger on partitioned table "sale" instead.
|
HINT: Rename the trigger on the partitioned table "sale" instead.
|
||||||
SELECT count(*) FROM pg_trigger WHERE tgname like 'another_trigger_name%';
|
SELECT count(*) FROM pg_trigger WHERE tgname like 'another_trigger_name%';
|
||||||
count
|
count
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
|
|
|
@ -58,7 +58,7 @@ test: cte_inline recursive_view_local_table values sequences_with_different_type
|
||||||
test: pg13 pg12
|
test: pg13 pg12
|
||||||
# run pg14 sequentially as it syncs metadata
|
# run pg14 sequentially as it syncs metadata
|
||||||
test: pg14
|
test: pg14
|
||||||
test: pg15
|
test: pg15 pg15_jsonpath
|
||||||
test: drop_column_partitioned_table
|
test: drop_column_partitioned_table
|
||||||
test: tableam
|
test: tableam
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,176 @@
|
||||||
|
#include "isolation_mx_common.include.spec"
|
||||||
|
|
||||||
|
// Test scenario for nonblocking split and concurrent INSERT/UPDATE/DELETE
|
||||||
|
// session s1 - Executes create_distributed_table_concurrently after dropping a column on tables with replica identities
|
||||||
|
// session s2 - Does concurrent inserts/update/delete
|
||||||
|
// session s3 - Holds advisory locks
|
||||||
|
|
||||||
|
setup
|
||||||
|
{
|
||||||
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
CREATE TABLE observations_with_pk (
|
||||||
|
tenant_id text not null,
|
||||||
|
dummy int,
|
||||||
|
measurement_id bigserial not null,
|
||||||
|
payload jsonb not null,
|
||||||
|
observation_time timestamptz not null default '03/11/2018 02:00:00'::TIMESTAMP,
|
||||||
|
PRIMARY KEY (tenant_id, measurement_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE observations_with_full_replica_identity (
|
||||||
|
tenant_id text not null,
|
||||||
|
dummy int,
|
||||||
|
measurement_id bigserial not null,
|
||||||
|
payload jsonb not null,
|
||||||
|
observation_time timestamptz not null default '03/11/2018 02:00:00'::TIMESTAMP
|
||||||
|
);
|
||||||
|
ALTER TABLE observations_with_full_replica_identity REPLICA IDENTITY FULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
teardown
|
||||||
|
{
|
||||||
|
DROP TABLE observations_with_pk;
|
||||||
|
DROP TABLE observations_with_full_replica_identity;
|
||||||
|
}
|
||||||
|
|
||||||
|
session "s1"
|
||||||
|
|
||||||
|
step "s1-alter-table"
|
||||||
|
{
|
||||||
|
ALTER TABLE observations_with_pk DROP COLUMN dummy;
|
||||||
|
ALTER TABLE observations_with_full_replica_identity DROP COLUMN dummy;
|
||||||
|
}
|
||||||
|
|
||||||
|
step "s1-set-factor-1"
|
||||||
|
{
|
||||||
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SELECT citus_set_coordinator_host('localhost');
|
||||||
|
}
|
||||||
|
|
||||||
|
step "s1-create-distributed-table-observations_with_pk-concurrently"
|
||||||
|
{
|
||||||
|
SELECT create_distributed_table_concurrently('observations_with_pk','tenant_id');
|
||||||
|
}
|
||||||
|
|
||||||
|
step "s1-create-distributed-table-observations-2-concurrently"
|
||||||
|
{
|
||||||
|
SELECT create_distributed_table_concurrently('observations_with_full_replica_identity','tenant_id');
|
||||||
|
}
|
||||||
|
|
||||||
|
session "s2"
|
||||||
|
|
||||||
|
step "s2-begin"
|
||||||
|
{
|
||||||
|
BEGIN;
|
||||||
|
}
|
||||||
|
|
||||||
|
step "s2-insert-observations_with_pk"
|
||||||
|
{
|
||||||
|
INSERT INTO observations_with_pk(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||||
|
INSERT INTO observations_with_pk(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||||
|
INSERT INTO observations_with_pk(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||||
|
INSERT INTO observations_with_pk(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||||
|
}
|
||||||
|
|
||||||
|
step "s2-insert-observations_with_full_replica_identity"
|
||||||
|
{
|
||||||
|
INSERT INTO observations_with_full_replica_identity(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||||
|
INSERT INTO observations_with_full_replica_identity(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||||
|
INSERT INTO observations_with_full_replica_identity(tenant_id, payload) SELECT 'tenant_id', jsonb_build_object('name', 29.3);
|
||||||
|
}
|
||||||
|
|
||||||
|
step "s2-update-observations_with_pk"
|
||||||
|
{
|
||||||
|
UPDATE observations_with_pk set observation_time='03/11/2019 02:00:00'::TIMESTAMP where tenant_id = 'tenant_id' and measurement_id = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
step "s2-update-primary-key-observations_with_pk"
|
||||||
|
{
|
||||||
|
UPDATE observations_with_pk set measurement_id=100 where tenant_id = 'tenant_id' and measurement_id = 4 ;
|
||||||
|
}
|
||||||
|
|
||||||
|
step "s2-update-observations_with_full_replica_identity"
|
||||||
|
{
|
||||||
|
UPDATE observations_with_full_replica_identity set observation_time='03/11/2019 02:00:00'::TIMESTAMP where tenant_id = 'tenant_id' and measurement_id = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
step "s2-delete-observations_with_pk"
|
||||||
|
{
|
||||||
|
DELETE FROM observations_with_pk where tenant_id = 'tenant_id' and measurement_id = 3 ;
|
||||||
|
}
|
||||||
|
|
||||||
|
step "s2-delete-observations_with_full_replica_identity"
|
||||||
|
{
|
||||||
|
DELETE FROM observations_with_full_replica_identity where tenant_id = 'tenant_id' and measurement_id = 3 ;
|
||||||
|
}
|
||||||
|
|
||||||
|
step "s2-end"
|
||||||
|
{
|
||||||
|
COMMIT;
|
||||||
|
}
|
||||||
|
|
||||||
|
step "s2-print-cluster-1"
|
||||||
|
{
|
||||||
|
-- row count per shard
|
||||||
|
SELECT
|
||||||
|
nodeport, shardid, success, result
|
||||||
|
FROM
|
||||||
|
run_command_on_placements('observations_with_pk', 'select count(*) from %s')
|
||||||
|
ORDER BY
|
||||||
|
nodeport, shardid;
|
||||||
|
|
||||||
|
SELECT *
|
||||||
|
FROM
|
||||||
|
observations_with_pk
|
||||||
|
ORDER BY
|
||||||
|
measurement_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
step "s2-print-cluster-2"
|
||||||
|
{
|
||||||
|
-- row count per shard
|
||||||
|
SELECT
|
||||||
|
nodeport, shardid, success, result
|
||||||
|
FROM
|
||||||
|
run_command_on_placements('observations_with_full_replica_identity', 'select count(*) from %s')
|
||||||
|
ORDER BY
|
||||||
|
nodeport, shardid;
|
||||||
|
|
||||||
|
SELECT *
|
||||||
|
FROM
|
||||||
|
observations_with_full_replica_identity
|
||||||
|
ORDER BY
|
||||||
|
measurement_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
session "s3"
|
||||||
|
|
||||||
|
// this advisory lock with (almost) random values are only used
|
||||||
|
// for testing purposes. For details, check Citus' logical replication
|
||||||
|
// source code
|
||||||
|
step "s3-acquire-advisory-lock"
|
||||||
|
{
|
||||||
|
SELECT pg_advisory_lock(44000, 55152);
|
||||||
|
}
|
||||||
|
|
||||||
|
step "s3-release-advisory-lock"
|
||||||
|
{
|
||||||
|
SELECT pg_advisory_unlock(44000, 55152);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Concurrent Insert/Update with create_distributed_table_concurrently(with primary key as replica identity) after dropping a column:
|
||||||
|
// s3 holds advisory lock -> s1 starts create_distributed_table_concurrently and waits for advisory lock ->
|
||||||
|
// s2 concurrently inserts/deletes/updates rows -> s3 releases the advisory lock
|
||||||
|
// -> s1 complete create_distributed_table_concurrently -> result is reflected in new shards
|
||||||
|
permutation "s2-print-cluster-1" "s3-acquire-advisory-lock" "s2-begin" "s1-alter-table" "s1-set-factor-1" "s1-create-distributed-table-observations_with_pk-concurrently" "s2-insert-observations_with_pk" "s2-update-observations_with_pk" "s2-end" "s2-print-cluster-1" "s3-release-advisory-lock" "s2-print-cluster-1"
|
||||||
|
permutation "s2-print-cluster-1" "s3-acquire-advisory-lock" "s2-begin" "s1-alter-table" "s1-set-factor-1" "s1-create-distributed-table-observations_with_pk-concurrently" "s2-insert-observations_with_pk" "s2-update-primary-key-observations_with_pk" "s2-end" "s2-print-cluster-1" "s3-release-advisory-lock" "s2-print-cluster-1"
|
||||||
|
permutation "s2-print-cluster-1" "s3-acquire-advisory-lock" "s2-begin" "s1-alter-table" "s1-set-factor-1" "s1-create-distributed-table-observations_with_pk-concurrently" "s2-insert-observations_with_pk" "s2-update-observations_with_pk" "s2-delete-observations_with_pk" "s2-end" "s2-print-cluster-1" "s3-release-advisory-lock" "s2-print-cluster-1"
|
||||||
|
|
||||||
|
|
||||||
|
// Concurrent Insert/Update with create_distributed_table_concurrently(with replica identity full) after dropping a column:
|
||||||
|
// s3 holds advisory lock -> s1 starts create_distributed_table_concurrently and waits for advisory lock ->
|
||||||
|
// s2 concurrently inserts/deletes/updates rows -> s3 releases the advisory lock
|
||||||
|
// -> s1 complete create_distributed_table_concurrently -> result is reflected in new shards
|
||||||
|
permutation "s2-print-cluster-2" "s3-acquire-advisory-lock" "s2-begin" "s1-alter-table" "s1-set-factor-1" "s1-create-distributed-table-observations-2-concurrently" "s2-insert-observations_with_full_replica_identity" "s2-update-observations_with_full_replica_identity" "s2-end" "s2-print-cluster-2" "s3-release-advisory-lock" "s2-print-cluster-2"
|
||||||
|
permutation "s2-print-cluster-2" "s3-acquire-advisory-lock" "s2-begin" "s1-alter-table" "s1-set-factor-1" "s1-create-distributed-table-observations-2-concurrently" "s2-insert-observations_with_full_replica_identity" "s2-update-observations_with_full_replica_identity" "s2-delete-observations_with_full_replica_identity" "s2-end" "s2-print-cluster-2" "s3-release-advisory-lock" "s2-print-cluster-2"
|
|
@ -0,0 +1,136 @@
|
||||||
|
// isolation_logical_replication_nonsu_nonbypassrls
|
||||||
|
// test moving a single shard that has rls
|
||||||
|
// owned by a user that is neither superuser nor bypassrls
|
||||||
|
// PG15 added extra permission checks within logical replication
|
||||||
|
// this test makes sure that target table owners should still
|
||||||
|
// be able to replicate despite RLS policies.
|
||||||
|
// Relevant PG commit: a2ab9c06ea15fbcb2bfde570986a06b37f52bcca
|
||||||
|
|
||||||
|
setup
|
||||||
|
{
|
||||||
|
-- setup involves a lot of DDL inside a single tx block, so use sequential mode
|
||||||
|
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
|
||||||
|
|
||||||
|
SET citus.max_cached_conns_per_worker to 0;
|
||||||
|
SET citus.next_shard_id TO 1234000;
|
||||||
|
SET citus.shard_count TO 4;
|
||||||
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
|
||||||
|
CREATE TABLE dist(column1 int PRIMARY KEY, column2 int);
|
||||||
|
SELECT create_distributed_table('dist', 'column1');
|
||||||
|
|
||||||
|
CREATE USER new_user;
|
||||||
|
GRANT ALL ON SCHEMA public TO new_user;
|
||||||
|
|
||||||
|
SELECT get_shard_id_for_distribution_column('dist', 23) INTO selected_shard;
|
||||||
|
GRANT ALL ON TABLE selected_shard TO new_user;
|
||||||
|
}
|
||||||
|
|
||||||
|
teardown
|
||||||
|
{
|
||||||
|
DROP TABLE selected_shard;
|
||||||
|
DROP TABLE dist;
|
||||||
|
REVOKE ALL ON SCHEMA public FROM new_user;
|
||||||
|
DROP USER new_user;
|
||||||
|
}
|
||||||
|
|
||||||
|
session "s1"
|
||||||
|
|
||||||
|
step "s1-no-connection-cache"
|
||||||
|
{
|
||||||
|
SET citus.max_cached_conns_per_worker to 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
step "s1-table-owner-new_user"
|
||||||
|
{
|
||||||
|
ALTER TABLE dist OWNER TO new_user;
|
||||||
|
}
|
||||||
|
|
||||||
|
step "s1-table-enable-rls"
|
||||||
|
{
|
||||||
|
ALTER TABLE dist ENABLE ROW LEVEL SECURITY;
|
||||||
|
}
|
||||||
|
|
||||||
|
step "s1-table-force-rls"
|
||||||
|
{
|
||||||
|
ALTER TABLE dist FORCE ROW LEVEL SECURITY;
|
||||||
|
}
|
||||||
|
|
||||||
|
step "s1-user-spec"
|
||||||
|
{
|
||||||
|
SELECT rolname, rolsuper, rolbypassrls FROM pg_authid WHERE rolname = 'new_user';
|
||||||
|
}
|
||||||
|
|
||||||
|
step "s1-begin"
|
||||||
|
{
|
||||||
|
BEGIN;
|
||||||
|
}
|
||||||
|
|
||||||
|
step "s1-set-role"
|
||||||
|
{
|
||||||
|
SET ROLE new_user;
|
||||||
|
}
|
||||||
|
|
||||||
|
step "s1-move-placement"
|
||||||
|
{
|
||||||
|
SELECT citus_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57638, 'localhost', 57637);
|
||||||
|
}
|
||||||
|
|
||||||
|
step "s1-reset-role"
|
||||||
|
{
|
||||||
|
RESET ROLE;
|
||||||
|
}
|
||||||
|
|
||||||
|
step "s1-end"
|
||||||
|
{
|
||||||
|
COMMIT;
|
||||||
|
}
|
||||||
|
|
||||||
|
step "s1-select"
|
||||||
|
{
|
||||||
|
SELECT * FROM dist ORDER BY column1;
|
||||||
|
}
|
||||||
|
|
||||||
|
step "s1-get-shard-distribution"
|
||||||
|
{
|
||||||
|
SELECT shardid, nodeport FROM pg_dist_placement INNER JOIN pg_dist_node ON (pg_dist_placement.groupid = pg_dist_node.groupid) WHERE shardstate != 4 AND shardid IN (SELECT * FROM selected_shard) ORDER BY nodeport;
|
||||||
|
}
|
||||||
|
|
||||||
|
session "s2"
|
||||||
|
|
||||||
|
step "s2-no-connection-cache"
|
||||||
|
{
|
||||||
|
SET citus.max_cached_conns_per_worker to 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
step "s2-insert"
|
||||||
|
{
|
||||||
|
INSERT INTO dist VALUES (23, 23);
|
||||||
|
}
|
||||||
|
|
||||||
|
session "s3"
|
||||||
|
|
||||||
|
step "s3-no-connection-cache"
|
||||||
|
{
|
||||||
|
SET citus.max_cached_conns_per_worker to 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
step "s3-acquire-advisory-lock"
|
||||||
|
{
|
||||||
|
SELECT pg_advisory_lock(44000, 55152);
|
||||||
|
}
|
||||||
|
|
||||||
|
step "s3-release-advisory-lock"
|
||||||
|
{
|
||||||
|
SELECT pg_advisory_unlock(44000, 55152);
|
||||||
|
}
|
||||||
|
|
||||||
|
// first permutation enables row level security
|
||||||
|
// second permutation forces row level security
|
||||||
|
// either way we should be able to complete the shard move
|
||||||
|
// Check out https://github.com/citusdata/citus/pull/6369#discussion_r979823178 for details
|
||||||
|
|
||||||
|
permutation "s1-table-owner-new_user" "s1-table-enable-rls" "s1-get-shard-distribution" "s1-user-spec" "s3-acquire-advisory-lock" "s1-begin" "s1-set-role" "s1-move-placement" "s2-insert" "s3-release-advisory-lock" "s1-reset-role" "s1-end" "s1-select" "s1-get-shard-distribution"
|
||||||
|
// running no connection cache commands on 2nd permutation because of #3785
|
||||||
|
// otherwise citus_move_shard_placement fails with permission error of new_user
|
||||||
|
permutation "s1-no-connection-cache" "s2-no-connection-cache" "s3-no-connection-cache" "s1-table-owner-new_user" "s1-table-force-rls" "s1-get-shard-distribution" "s1-user-spec" "s3-acquire-advisory-lock" "s1-begin" "s1-set-role" "s1-move-placement" "s2-insert" "s3-release-advisory-lock" "s1-reset-role" "s1-end" "s1-select" "s1-get-shard-distribution"
|
|
@ -33,6 +33,95 @@ SELECT table_name, citus_table_type, distribution_column, shard_count FROM publi
|
||||||
SELECT STRING_AGG(table_name::text, ', ' ORDER BY 1) AS "Colocation Groups" FROM public.citus_tables
|
SELECT STRING_AGG(table_name::text, ', ' ORDER BY 1) AS "Colocation Groups" FROM public.citus_tables
|
||||||
WHERE table_name IN ('dist_table', 'colocation_table', 'colocation_table_2') GROUP BY colocation_id ORDER BY 1;
|
WHERE table_name IN ('dist_table', 'colocation_table', 'colocation_table_2') GROUP BY colocation_id ORDER BY 1;
|
||||||
|
|
||||||
|
-- right now dist_table has columns a, b, dist_column is b, it has 6 shards
|
||||||
|
-- column cache is: a pos 1, b pos 2
|
||||||
|
|
||||||
|
-- let's add another column
|
||||||
|
ALTER TABLE dist_table ADD COLUMN c int DEFAULT 1;
|
||||||
|
|
||||||
|
-- right now column cache is: a pos 1, b pos 2, c pos 3
|
||||||
|
|
||||||
|
-- test using alter_distributed_table to change shard count after dropping one column
|
||||||
|
ALTER TABLE dist_table DROP COLUMN a;
|
||||||
|
|
||||||
|
-- right now column cache is: a pos 1 attisdropped=true, b pos 2, c pos 3
|
||||||
|
|
||||||
|
-- let's try changing the shard count
|
||||||
|
SELECT alter_distributed_table('dist_table', shard_count := 7, cascade_to_colocated := false);
|
||||||
|
|
||||||
|
-- right now column cache is: b pos 1, c pos 2 because a new table has been created
|
||||||
|
-- check that b is still distribution column
|
||||||
|
SELECT table_name, citus_table_type, distribution_column, shard_count FROM public.citus_tables
|
||||||
|
WHERE table_name = 'dist_table'::regclass;
|
||||||
|
|
||||||
|
-- let's add another column
|
||||||
|
ALTER TABLE dist_table ADD COLUMN d int DEFAULT 2;
|
||||||
|
|
||||||
|
-- right now column cache is: b pos 1, c pos 2, d pos 3, dist_column is b
|
||||||
|
|
||||||
|
-- test using alter_distributed_table to change dist. column after dropping one column
|
||||||
|
ALTER TABLE dist_table DROP COLUMN c;
|
||||||
|
|
||||||
|
-- right now column cache is: b pos 1, c pos 2 attisdropped=true, d pos 3
|
||||||
|
|
||||||
|
-- let's try changing the distribution column
|
||||||
|
SELECT alter_distributed_table('dist_table', distribution_column := 'd');
|
||||||
|
|
||||||
|
-- right now column cache is: b pos 1, d pos 2 because a new table has been created
|
||||||
|
-- check that d is the distribution column
|
||||||
|
SELECT table_name, citus_table_type, distribution_column, shard_count FROM public.citus_tables
|
||||||
|
WHERE table_name = 'dist_table'::regclass;
|
||||||
|
|
||||||
|
-- add another column and undistribute
|
||||||
|
ALTER TABLE dist_table ADD COLUMN e int DEFAULT 3;
|
||||||
|
SELECT undistribute_table('dist_table');
|
||||||
|
|
||||||
|
-- right now column cache is: b pos 1, d pos 2, e pos 3, table is not Citus table
|
||||||
|
-- try dropping column and then distributing
|
||||||
|
|
||||||
|
ALTER TABLE dist_table DROP COLUMN b;
|
||||||
|
|
||||||
|
-- right now column cache is: b pos 1 attisdropped=true, d pos 2, e pos 3
|
||||||
|
|
||||||
|
-- distribute with d
|
||||||
|
SELECT create_distributed_table ('dist_table', 'd', colocate_with := 'none');
|
||||||
|
|
||||||
|
-- check that d is the distribution column
|
||||||
|
SELECT table_name, citus_table_type, distribution_column, shard_count FROM public.citus_tables
|
||||||
|
WHERE table_name = 'dist_table'::regclass;
|
||||||
|
|
||||||
|
-- alter distribution column to e
|
||||||
|
SELECT alter_distributed_table('dist_table', distribution_column := 'e');
|
||||||
|
|
||||||
|
-- right now column cache is: d pos 1, e pos 2
|
||||||
|
-- check that e is the distribution column
|
||||||
|
SELECT table_name, citus_table_type, distribution_column, shard_count FROM public.citus_tables
|
||||||
|
WHERE table_name = 'dist_table'::regclass;
|
||||||
|
|
||||||
|
ALTER TABLE dist_table ADD COLUMN a int DEFAULT 4;
|
||||||
|
ALTER TABLE dist_table ADD COLUMN b int DEFAULT 5;
|
||||||
|
|
||||||
|
-- right now column cache is: d pos 1, e pos 2, a pos 3, b pos 4
|
||||||
|
|
||||||
|
-- alter distribution column to a
|
||||||
|
SELECT alter_distributed_table('dist_table', distribution_column := 'a');
|
||||||
|
|
||||||
|
-- right now column cache hasn't changed
|
||||||
|
-- check that a is the distribution column
|
||||||
|
SELECT table_name, citus_table_type, distribution_column, shard_count FROM public.citus_tables
|
||||||
|
WHERE table_name = 'dist_table'::regclass;
|
||||||
|
|
||||||
|
ALTER TABLE dist_table DROP COLUMN d;
|
||||||
|
ALTER TABLE dist_table DROP COLUMN e;
|
||||||
|
-- right now column cache is: d pos 1 attisdropped=true, e pos 2 attisdropped=true, a pos 3, b pos 4
|
||||||
|
|
||||||
|
-- alter distribution column to b
|
||||||
|
SELECT alter_distributed_table('dist_table', distribution_column := 'b');
|
||||||
|
-- column cache is: a pos 1, b pos 2 -> configuration with which we started these drop column tests
|
||||||
|
-- check that b is the distribution column
|
||||||
|
SELECT table_name, citus_table_type, distribution_column, shard_count FROM public.citus_tables
|
||||||
|
WHERE table_name = 'dist_table'::regclass;
|
||||||
|
|
||||||
-- test altering colocation, note that shard count will also change
|
-- test altering colocation, note that shard count will also change
|
||||||
SELECT alter_distributed_table('dist_table', colocate_with := 'alter_distributed_table.colocation_table');
|
SELECT alter_distributed_table('dist_table', colocate_with := 'alter_distributed_table.colocation_table');
|
||||||
SELECT table_name, citus_table_type, distribution_column, shard_count FROM public.citus_tables
|
SELECT table_name, citus_table_type, distribution_column, shard_count FROM public.citus_tables
|
||||||
|
|
|
@ -102,5 +102,22 @@ ALTER USER current_user SET search_path TO test_sp;
|
||||||
SELECT COUNT(*) FROM public.test_search_path;
|
SELECT COUNT(*) FROM public.test_search_path;
|
||||||
ALTER USER current_user RESET search_path;
|
ALTER USER current_user RESET search_path;
|
||||||
|
|
||||||
|
-- test empty/null password: it is treated the same as no password
|
||||||
|
SET password_encryption TO md5;
|
||||||
|
|
||||||
|
CREATE ROLE new_role;
|
||||||
|
SELECT workers.result AS worker_password, pg_authid.rolpassword AS coord_password FROM run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname = 'new_role'$$) workers, pg_authid WHERE pg_authid.rolname = 'new_role';
|
||||||
|
|
||||||
|
ALTER ROLE new_role PASSWORD '';
|
||||||
|
SELECT workers.result AS worker_password, pg_authid.rolpassword AS coord_password FROM run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname = 'new_role'$$) workers, pg_authid WHERE pg_authid.rolname = 'new_role';
|
||||||
|
|
||||||
|
ALTER ROLE new_role PASSWORD 'new_password';
|
||||||
|
SELECT workers.result AS worker_password, pg_authid.rolpassword AS coord_password, workers.result = pg_authid.rolpassword AS password_is_same FROM run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname = 'new_role'$$) workers, pg_authid WHERE pg_authid.rolname = 'new_role';
|
||||||
|
|
||||||
|
ALTER ROLE new_role PASSWORD NULL;
|
||||||
|
SELECT workers.result AS worker_password, pg_authid.rolpassword AS coord_password FROM run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname = 'new_role'$$) workers, pg_authid WHERE pg_authid.rolname = 'new_role';
|
||||||
|
|
||||||
|
RESET password_encryption;
|
||||||
|
DROP ROLE new_role;
|
||||||
DROP TABLE test_search_path;
|
DROP TABLE test_search_path;
|
||||||
DROP SCHEMA alter_role, ",CitUs,.TeeN!?", test_sp CASCADE;
|
DROP SCHEMA alter_role, ",CitUs,.TeeN!?", test_sp CASCADE;
|
||||||
|
|
|
@ -59,6 +59,24 @@ SELECT 1 FROM citus_rebalance_start();
|
||||||
SELECT rebalance_table_shards();
|
SELECT rebalance_table_shards();
|
||||||
SELECT citus_rebalance_wait();
|
SELECT citus_rebalance_wait();
|
||||||
|
|
||||||
|
DROP TABLE t1;
|
||||||
|
|
||||||
|
|
||||||
|
-- make sure a non-super user can stop rebalancing
|
||||||
|
CREATE USER non_super_user_rebalance WITH LOGIN;
|
||||||
|
GRANT ALL ON SCHEMA background_rebalance TO non_super_user_rebalance;
|
||||||
|
|
||||||
|
SET ROLE non_super_user_rebalance;
|
||||||
|
|
||||||
|
CREATE TABLE non_super_user_t1 (a int PRIMARY KEY);
|
||||||
|
SELECT create_distributed_table('non_super_user_t1', 'a', shard_count => 4, colocate_with => 'none');
|
||||||
|
SELECT citus_move_shard_placement(85674008, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode => 'block_writes');
|
||||||
|
|
||||||
|
SELECT 1 FROM citus_rebalance_start();
|
||||||
|
SELECT citus_rebalance_stop();
|
||||||
|
|
||||||
|
RESET ROLE;
|
||||||
|
|
||||||
|
|
||||||
SET client_min_messages TO WARNING;
|
SET client_min_messages TO WARNING;
|
||||||
DROP SCHEMA background_rebalance CASCADE;
|
DROP SCHEMA background_rebalance CASCADE;
|
||||||
|
|
|
@ -185,6 +185,16 @@ ALTER TRIGGER "trigger\'name" ON "interesting!schema"."citus_local!_table" RENAM
|
||||||
SELECT * FROM citus_local_table_triggers
|
SELECT * FROM citus_local_table_triggers
|
||||||
WHERE tgname NOT LIKE 'RI_ConstraintTrigger%';
|
WHERE tgname NOT LIKE 'RI_ConstraintTrigger%';
|
||||||
|
|
||||||
|
-- ALTER TABLE ENABLE REPLICA trigger
|
||||||
|
ALTER TABLE "interesting!schema"."citus_local!_table" ENABLE REPLICA TRIGGER "trigger\'name22";
|
||||||
|
SELECT * FROM citus_local_table_triggers
|
||||||
|
WHERE tgname NOT LIKE 'RI_ConstraintTrigger%';
|
||||||
|
|
||||||
|
-- ALTER TABLE ENABLE ALWAYS trigger
|
||||||
|
ALTER TABLE "interesting!schema"."citus_local!_table" ENABLE ALWAYS TRIGGER "trigger\'name22";
|
||||||
|
SELECT * FROM citus_local_table_triggers
|
||||||
|
WHERE tgname NOT LIKE 'RI_ConstraintTrigger%';
|
||||||
|
|
||||||
-- ALTER TABLE DISABLE trigger
|
-- ALTER TABLE DISABLE trigger
|
||||||
ALTER TABLE "interesting!schema"."citus_local!_table" DISABLE TRIGGER "trigger\'name22";
|
ALTER TABLE "interesting!schema"."citus_local!_table" DISABLE TRIGGER "trigger\'name22";
|
||||||
SELECT * FROM citus_local_table_triggers
|
SELECT * FROM citus_local_table_triggers
|
||||||
|
|
|
@ -35,6 +35,12 @@ CREATE TABLE citus_local_table_1 (a int primary key);
|
||||||
-- this should fail as coordinator is removed from pg_dist_node
|
-- this should fail as coordinator is removed from pg_dist_node
|
||||||
SELECT citus_add_local_table_to_metadata('citus_local_table_1');
|
SELECT citus_add_local_table_to_metadata('citus_local_table_1');
|
||||||
|
|
||||||
|
-- This should also fail as coordinator is removed from pg_dist_node.
|
||||||
|
--
|
||||||
|
-- This is not a great place to test this but is one of those places that we
|
||||||
|
-- have workers in metadata but not the coordinator.
|
||||||
|
SELECT create_distributed_table_concurrently('citus_local_table_1', 'a');
|
||||||
|
|
||||||
-- let coordinator have citus local tables again for next tests
|
-- let coordinator have citus local tables again for next tests
|
||||||
set client_min_messages to ERROR;
|
set client_min_messages to ERROR;
|
||||||
SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0);
|
SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0);
|
||||||
|
|
|
@ -29,3 +29,17 @@ ALTER TABLE test_alter_table ALTER COLUMN b TYPE float USING (b::float + 0.5);
|
||||||
SELECT * FROM test_alter_table ORDER BY a;
|
SELECT * FROM test_alter_table ORDER BY a;
|
||||||
|
|
||||||
DROP TABLE test_alter_table;
|
DROP TABLE test_alter_table;
|
||||||
|
|
||||||
|
-- Make sure that the correct table options are used when rewriting the table.
|
||||||
|
-- This is reflected by the VACUUM VERBOSE output right after a rewrite showing
|
||||||
|
-- that all chunks are compressed with the configured compression algorithm
|
||||||
|
-- https://github.com/citusdata/citus/issues/5927
|
||||||
|
CREATE TABLE test(i int) USING columnar;
|
||||||
|
ALTER TABLE test SET (columnar.compression = lz4);
|
||||||
|
INSERT INTO test VALUES(1);
|
||||||
|
VACUUM VERBOSE test;
|
||||||
|
|
||||||
|
ALTER TABLE test ALTER COLUMN i TYPE int8;
|
||||||
|
VACUUM VERBOSE test;
|
||||||
|
|
||||||
|
DROP TABLE test;
|
||||||
|
|
|
@ -38,6 +38,12 @@ select create_distributed_table_concurrently('nocolo','x');
|
||||||
select create_distributed_table_concurrently('test','key', colocate_with := 'nocolo');
|
select create_distributed_table_concurrently('test','key', colocate_with := 'nocolo');
|
||||||
select create_distributed_table_concurrently('test','key', colocate_with := 'noexists');
|
select create_distributed_table_concurrently('test','key', colocate_with := 'noexists');
|
||||||
|
|
||||||
|
select citus_set_node_property('localhost', :worker_1_port, 'shouldhaveshards', false);
|
||||||
|
select citus_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', false);
|
||||||
|
select create_distributed_table_concurrently('test','key');
|
||||||
|
select citus_set_node_property('localhost', :worker_1_port, 'shouldhaveshards', true);
|
||||||
|
select citus_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', true);
|
||||||
|
|
||||||
-- use colocate_with "default"
|
-- use colocate_with "default"
|
||||||
select create_distributed_table_concurrently('test','key', shard_count := 11);
|
select create_distributed_table_concurrently('test','key', shard_count := 11);
|
||||||
|
|
||||||
|
|
|
@ -246,19 +246,6 @@ $Q$);
|
||||||
|
|
||||||
\set VERBOSITY default
|
\set VERBOSITY default
|
||||||
|
|
||||||
-- enable_group_by_reordering is a new GUC introduced in PG15
|
|
||||||
-- it does some optimization of the order of group by keys which results
|
|
||||||
-- in a different explain output plan between PG13/14 and PG15
|
|
||||||
-- Hence we set that GUC to off.
|
|
||||||
SHOW server_version \gset
|
|
||||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
|
||||||
\gset
|
|
||||||
\if :server_version_ge_15
|
|
||||||
SET enable_group_by_reordering TO off;
|
|
||||||
\endif
|
|
||||||
SELECT DISTINCT 1 FROM run_command_on_workers($$ALTER SYSTEM SET enable_group_by_reordering TO off;$$);
|
|
||||||
SELECT run_command_on_workers($$SELECT pg_reload_conf()$$);
|
|
||||||
|
|
||||||
EXPLAIN (COSTS OFF) WITH cte_1 AS NOT MATERIALIZED (SELECT * FROM test_table)
|
EXPLAIN (COSTS OFF) WITH cte_1 AS NOT MATERIALIZED (SELECT * FROM test_table)
|
||||||
SELECT
|
SELECT
|
||||||
count(*)
|
count(*)
|
||||||
|
@ -268,13 +255,6 @@ FROM
|
||||||
cte_1 as second_entry
|
cte_1 as second_entry
|
||||||
USING (key);
|
USING (key);
|
||||||
|
|
||||||
\if :server_version_ge_15
|
|
||||||
RESET enable_group_by_reordering;
|
|
||||||
\endif
|
|
||||||
SELECT DISTINCT 1 FROM run_command_on_workers($$ALTER SYSTEM RESET enable_group_by_reordering;$$);
|
|
||||||
SELECT run_command_on_workers($$SELECT pg_reload_conf()$$);
|
|
||||||
|
|
||||||
|
|
||||||
-- ctes with volatile functions are not
|
-- ctes with volatile functions are not
|
||||||
-- inlined
|
-- inlined
|
||||||
WITH cte_1 AS (SELECT *, random() FROM test_table)
|
WITH cte_1 AS (SELECT *, random() FROM test_table)
|
||||||
|
|
|
@ -473,7 +473,88 @@ SELECT * FROM distributed_table_change;
|
||||||
SELECT tgrelid::regclass::text, tgname FROM pg_trigger WHERE tgname like 'insert_99_trigger%' ORDER BY 1,2;
|
SELECT tgrelid::regclass::text, tgname FROM pg_trigger WHERE tgname like 'insert_99_trigger%' ORDER BY 1,2;
|
||||||
SELECT run_command_on_workers($$SELECT count(*) FROM pg_trigger WHERE tgname like 'insert_99_trigger%'$$);
|
SELECT run_command_on_workers($$SELECT count(*) FROM pg_trigger WHERE tgname like 'insert_99_trigger%'$$);
|
||||||
|
|
||||||
RESET client_min_messages;
|
CREATE TABLE "dist_\'table"(a int);
|
||||||
|
|
||||||
|
CREATE FUNCTION trigger_func()
|
||||||
|
RETURNS trigger
|
||||||
|
LANGUAGE plpgsql
|
||||||
|
AS $function$
|
||||||
|
BEGIN
|
||||||
|
RETURN NULL;
|
||||||
|
END;
|
||||||
|
$function$;
|
||||||
|
|
||||||
|
CREATE TRIGGER default_mode_trigger
|
||||||
|
AFTER UPDATE OR DELETE ON "dist_\'table"
|
||||||
|
FOR STATEMENT EXECUTE FUNCTION trigger_func();
|
||||||
|
|
||||||
|
CREATE TRIGGER "disabled_trigger\'"
|
||||||
|
AFTER UPDATE OR DELETE ON "dist_\'table"
|
||||||
|
FOR STATEMENT EXECUTE FUNCTION trigger_func();
|
||||||
|
|
||||||
|
ALTER TABLE "dist_\'table" DISABLE trigger "disabled_trigger\'";
|
||||||
|
|
||||||
|
CREATE TRIGGER replica_trigger
|
||||||
|
AFTER UPDATE OR DELETE ON "dist_\'table"
|
||||||
|
FOR STATEMENT EXECUTE FUNCTION trigger_func();
|
||||||
|
|
||||||
|
ALTER TABLE "dist_\'table" ENABLE REPLICA trigger replica_trigger;
|
||||||
|
|
||||||
|
CREATE TRIGGER always_enabled_trigger
|
||||||
|
AFTER UPDATE OR DELETE ON "dist_\'table"
|
||||||
|
FOR STATEMENT EXECUTE FUNCTION trigger_func();
|
||||||
|
|
||||||
|
ALTER TABLE "dist_\'table" ENABLE ALWAYS trigger always_enabled_trigger;
|
||||||
|
|
||||||
|
CREATE TRIGGER noop_enabled_trigger
|
||||||
|
AFTER UPDATE OR DELETE ON "dist_\'table"
|
||||||
|
FOR STATEMENT EXECUTE FUNCTION trigger_func();
|
||||||
|
|
||||||
|
ALTER TABLE "dist_\'table" ENABLE trigger noop_enabled_trigger;
|
||||||
|
|
||||||
|
SELECT create_distributed_table('dist_\''table', 'a');
|
||||||
|
|
||||||
|
SELECT bool_and(tgenabled = 'O') FROM pg_trigger WHERE tgname LIKE 'default_mode_trigger%';
|
||||||
|
SELECT run_command_on_workers($$SELECT bool_and(tgenabled = 'O') FROM pg_trigger WHERE tgname LIKE 'default_mode_trigger%'$$);
|
||||||
|
|
||||||
|
SELECT bool_and(tgenabled = 'D') FROM pg_trigger WHERE tgname LIKE 'disabled_trigger%';
|
||||||
|
SELECT run_command_on_workers($$SELECT bool_and(tgenabled = 'D') FROM pg_trigger WHERE tgname LIKE 'disabled_trigger%'$$);
|
||||||
|
|
||||||
|
SELECT bool_and(tgenabled = 'R') FROM pg_trigger WHERE tgname LIKE 'replica_trigger%';
|
||||||
|
SELECT run_command_on_workers($$SELECT bool_and(tgenabled = 'R') FROM pg_trigger WHERE tgname LIKE 'replica_trigger%'$$);
|
||||||
|
|
||||||
|
SELECT bool_and(tgenabled = 'A') FROM pg_trigger WHERE tgname LIKE 'always_enabled_trigger%';
|
||||||
|
SELECT run_command_on_workers($$SELECT bool_and(tgenabled = 'A') FROM pg_trigger WHERE tgname LIKE 'always_enabled_trigger%'$$);
|
||||||
|
|
||||||
|
SELECT bool_and(tgenabled = 'O') FROM pg_trigger WHERE tgname LIKE 'noop_enabled_trigger%';
|
||||||
|
SELECT run_command_on_workers($$SELECT bool_and(tgenabled = 'O') FROM pg_trigger WHERE tgname LIKE 'noop_enabled_trigger%'$$);
|
||||||
|
|
||||||
|
CREATE TABLE citus_local(a int);
|
||||||
|
|
||||||
|
CREATE FUNCTION citus_local_trig_func()
|
||||||
|
RETURNS trigger
|
||||||
|
LANGUAGE plpgsql
|
||||||
|
AS $function$
|
||||||
|
BEGIN
|
||||||
|
RETURN NULL;
|
||||||
|
END;
|
||||||
|
$function$;
|
||||||
|
|
||||||
|
CREATE TRIGGER citus_local_trig
|
||||||
|
AFTER UPDATE OR DELETE ON citus_local
|
||||||
|
FOR STATEMENT EXECUTE FUNCTION citus_local_trig_func();
|
||||||
|
|
||||||
|
-- make sure that trigger is initially not disabled
|
||||||
|
SELECT tgenabled = 'D' FROM pg_trigger WHERE tgname LIKE 'citus_local_trig%';
|
||||||
|
|
||||||
|
ALTER TABLE citus_local DISABLE trigger citus_local_trig;
|
||||||
|
|
||||||
|
SELECT citus_add_local_table_to_metadata('citus_local');
|
||||||
|
|
||||||
|
SELECT bool_and(tgenabled = 'D') FROM pg_trigger WHERE tgname LIKE 'citus_local_trig%';
|
||||||
|
SELECT run_command_on_workers($$SELECT bool_and(tgenabled = 'D') FROM pg_trigger WHERE tgname LIKE 'citus_local_trig%'$$);
|
||||||
|
|
||||||
|
SET client_min_messages TO ERROR;
|
||||||
RESET citus.enable_unsafe_triggers;
|
RESET citus.enable_unsafe_triggers;
|
||||||
SELECT run_command_on_workers('ALTER SYSTEM RESET citus.enable_unsafe_triggers;');
|
SELECT run_command_on_workers('ALTER SYSTEM RESET citus.enable_unsafe_triggers;');
|
||||||
SELECT run_command_on_workers('SELECT pg_reload_conf();');
|
SELECT run_command_on_workers('SELECT pg_reload_conf();');
|
||||||
|
|
|
@ -32,8 +32,14 @@ INSERT INTO t SELECT x, x+1, MD5(random()::text) FROM generate_series(1,100000)
|
||||||
-- Initial shard placements
|
-- Initial shard placements
|
||||||
SELECT * FROM shards_in_workers;
|
SELECT * FROM shards_in_workers;
|
||||||
|
|
||||||
-- failure on creating the subscription
|
-- Failure on creating the subscription
|
||||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE SUBSCRIPTION").kill()');
|
-- Failing exactly on CREATE SUBSCRIPTION is causing flaky test where we fail with either:
|
||||||
|
-- 1) ERROR: connection to the remote node localhost:xxxxx failed with the following error: ERROR: subscription "citus_shard_move_subscription_xxxxxxx" does not exist
|
||||||
|
-- another command is already in progress
|
||||||
|
-- 2) ERROR: connection to the remote node localhost:xxxxx failed with the following error: another command is already in progress
|
||||||
|
-- Instead fail on the next step (ALTER SUBSCRIPTION) instead which is also required logically as part of uber CREATE SUBSCRIPTION operation.
|
||||||
|
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="ALTER SUBSCRIPTION").kill()');
|
||||||
SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port);
|
SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port);
|
||||||
|
|
||||||
-- Verify that the shard is not moved and the number of rows are still 100k
|
-- Verify that the shard is not moved and the number of rows are still 100k
|
||||||
|
|
|
@ -254,20 +254,6 @@ FROM
|
||||||
user_id) AS subquery;
|
user_id) AS subquery;
|
||||||
|
|
||||||
-- Union and left join subquery pushdown
|
-- Union and left join subquery pushdown
|
||||||
|
|
||||||
-- enable_group_by_reordering is a new GUC introduced in PG15
|
|
||||||
-- it does some optimization of the order of group by keys which results
|
|
||||||
-- in a different explain output plan between PG13/14 and PG15
|
|
||||||
-- Hence we set that GUC to off.
|
|
||||||
SHOW server_version \gset
|
|
||||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
|
||||||
\gset
|
|
||||||
\if :server_version_ge_15
|
|
||||||
SET enable_group_by_reordering TO off;
|
|
||||||
\endif
|
|
||||||
SELECT DISTINCT 1 FROM run_command_on_workers($$ALTER SYSTEM SET enable_group_by_reordering TO off;$$);
|
|
||||||
SELECT run_command_on_workers($$SELECT pg_reload_conf()$$);
|
|
||||||
|
|
||||||
EXPLAIN (COSTS OFF)
|
EXPLAIN (COSTS OFF)
|
||||||
SELECT
|
SELECT
|
||||||
avg(array_length(events, 1)) AS event_average,
|
avg(array_length(events, 1)) AS event_average,
|
||||||
|
@ -403,12 +389,6 @@ GROUP BY
|
||||||
ORDER BY
|
ORDER BY
|
||||||
count_pay;
|
count_pay;
|
||||||
|
|
||||||
\if :server_version_ge_15
|
|
||||||
RESET enable_group_by_reordering;
|
|
||||||
\endif
|
|
||||||
SELECT DISTINCT 1 FROM run_command_on_workers($$ALTER SYSTEM RESET enable_group_by_reordering;$$);
|
|
||||||
SELECT run_command_on_workers($$SELECT pg_reload_conf()$$);
|
|
||||||
|
|
||||||
-- Lateral join subquery pushdown
|
-- Lateral join subquery pushdown
|
||||||
-- set subquery_pushdown due to limit in the query
|
-- set subquery_pushdown due to limit in the query
|
||||||
SET citus.subquery_pushdown to ON;
|
SET citus.subquery_pushdown to ON;
|
||||||
|
|
|
@ -400,13 +400,21 @@ SELECT * FROM multi_extension.print_extension_changes();
|
||||||
ALTER EXTENSION citus UPDATE TO '10.2-4';
|
ALTER EXTENSION citus UPDATE TO '10.2-4';
|
||||||
SELECT * FROM multi_extension.print_extension_changes();
|
SELECT * FROM multi_extension.print_extension_changes();
|
||||||
|
|
||||||
-- Snapshot of state at 10.2-5
|
-- There was a bug when downgrading to 10.2-2 from 10.2-4
|
||||||
ALTER EXTENSION citus UPDATE TO '10.2-5';
|
-- Test that we do not have any issues with this particular downgrade
|
||||||
|
ALTER EXTENSION citus UPDATE TO '10.2-2';
|
||||||
|
ALTER EXTENSION citus UPDATE TO '10.2-4';
|
||||||
SELECT * FROM multi_extension.print_extension_changes();
|
SELECT * FROM multi_extension.print_extension_changes();
|
||||||
|
|
||||||
-- Test downgrade to 10.2-4 from 10.2-5
|
-- Test downgrade to 10.2-4 from 10.2-5
|
||||||
ALTER EXTENSION citus UPDATE TO '10.2-4';
|
|
||||||
ALTER EXTENSION citus UPDATE TO '10.2-5';
|
ALTER EXTENSION citus UPDATE TO '10.2-5';
|
||||||
|
ALTER EXTENSION citus UPDATE TO '10.2-4';
|
||||||
|
-- Should be empty result since upgrade+downgrade should be a no-op
|
||||||
|
SELECT * FROM multi_extension.print_extension_changes();
|
||||||
|
|
||||||
|
-- Snapshot of state at 10.2-5
|
||||||
|
ALTER EXTENSION citus UPDATE TO '10.2-5';
|
||||||
|
SELECT * FROM multi_extension.print_extension_changes();
|
||||||
|
|
||||||
-- Make sure that we defined dependencies from all rel objects (tables,
|
-- Make sure that we defined dependencies from all rel objects (tables,
|
||||||
-- indexes, sequences ..) to columnar table access method ...
|
-- indexes, sequences ..) to columnar table access method ...
|
||||||
|
@ -795,5 +803,39 @@ FROM test.maintenance_worker();
|
||||||
-- confirm that there is only one maintenance daemon
|
-- confirm that there is only one maintenance daemon
|
||||||
SELECT count(*) FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon';
|
SELECT count(*) FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon';
|
||||||
|
|
||||||
|
-- confirm that we can create a distributed table concurrently on an empty node
|
||||||
|
DROP EXTENSION citus;
|
||||||
|
CREATE EXTENSION citus;
|
||||||
|
CREATE TABLE test (x int, y int);
|
||||||
|
INSERT INTO test VALUES (1,2);
|
||||||
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SET citus.defer_drop_after_shard_split TO off;
|
||||||
|
SELECT create_distributed_table_concurrently('test','x');
|
||||||
|
DROP TABLE test;
|
||||||
|
TRUNCATE pg_dist_node;
|
||||||
|
|
||||||
|
-- confirm that we can create a distributed table on an empty node
|
||||||
|
CREATE TABLE test (x int, y int);
|
||||||
|
INSERT INTO test VALUES (1,2);
|
||||||
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SELECT create_distributed_table('test','x');
|
||||||
|
DROP TABLE test;
|
||||||
|
TRUNCATE pg_dist_node;
|
||||||
|
|
||||||
|
-- confirm that we can create a reference table on an empty node
|
||||||
|
CREATE TABLE test (x int, y int);
|
||||||
|
INSERT INTO test VALUES (1,2);
|
||||||
|
SELECT create_reference_table('test');
|
||||||
|
DROP TABLE test;
|
||||||
|
TRUNCATE pg_dist_node;
|
||||||
|
|
||||||
|
-- confirm that we can create a local table on an empty node
|
||||||
|
CREATE TABLE test (x int, y int);
|
||||||
|
INSERT INTO test VALUES (1,2);
|
||||||
|
SELECT citus_add_local_table_to_metadata('test');
|
||||||
|
DROP TABLE test;
|
||||||
|
DROP EXTENSION citus;
|
||||||
|
CREATE EXTENSION citus;
|
||||||
|
|
||||||
DROP TABLE version_mismatch_table;
|
DROP TABLE version_mismatch_table;
|
||||||
DROP SCHEMA multi_extension;
|
DROP SCHEMA multi_extension;
|
||||||
|
|
|
@ -696,5 +696,87 @@ DROP TABLE dropfkeytest1 CASCADE;
|
||||||
-- this should work
|
-- this should work
|
||||||
SELECT create_distributed_table ('dropfkeytest2', 'x', colocate_with:='none');
|
SELECT create_distributed_table ('dropfkeytest2', 'x', colocate_with:='none');
|
||||||
|
|
||||||
|
CREATE TABLE set_on_default_test_referenced(
|
||||||
|
col_1 int, col_2 int, col_3 int, col_4 int,
|
||||||
|
unique (col_1, col_3)
|
||||||
|
);
|
||||||
|
SELECT create_reference_table('set_on_default_test_referenced');
|
||||||
|
|
||||||
|
CREATE TABLE set_on_default_test_referencing(
|
||||||
|
col_1 int, col_2 int, col_3 serial, col_4 int,
|
||||||
|
FOREIGN KEY(col_1, col_3)
|
||||||
|
REFERENCES set_on_default_test_referenced(col_1, col_3)
|
||||||
|
ON UPDATE SET DEFAULT
|
||||||
|
);
|
||||||
|
|
||||||
|
-- from distributed / reference to reference, fkey exists before calling the UDFs
|
||||||
|
SELECT create_distributed_table('set_on_default_test_referencing', 'col_1');
|
||||||
|
SELECT create_reference_table('set_on_default_test_referencing');
|
||||||
|
|
||||||
|
DROP TABLE set_on_default_test_referencing;
|
||||||
|
CREATE TABLE set_on_default_test_referencing(
|
||||||
|
col_1 serial, col_2 int, col_3 int, col_4 int
|
||||||
|
);
|
||||||
|
SELECT create_reference_table('set_on_default_test_referencing');
|
||||||
|
|
||||||
|
-- from reference to reference, fkey doesn't exist before calling the UDFs
|
||||||
|
ALTER TABLE set_on_default_test_referencing ADD CONSTRAINT fkey
|
||||||
|
FOREIGN KEY(col_1, col_3) REFERENCES set_on_default_test_referenced(col_1, col_3)
|
||||||
|
ON DELETE SET DEFAULT;
|
||||||
|
|
||||||
|
DROP TABLE set_on_default_test_referencing;
|
||||||
|
CREATE TABLE set_on_default_test_referencing(
|
||||||
|
col_1 int, col_2 serial, col_3 int, col_4 bigserial
|
||||||
|
);
|
||||||
|
SELECT create_reference_table('set_on_default_test_referencing');
|
||||||
|
|
||||||
|
-- ok since referencing columns are not based on sequences
|
||||||
|
ALTER TABLE set_on_default_test_referencing ADD CONSTRAINT fkey
|
||||||
|
FOREIGN KEY(col_1, col_3) REFERENCES set_on_default_test_referenced(col_1, col_3)
|
||||||
|
ON DELETE SET DEFAULT;
|
||||||
|
|
||||||
|
DROP TABLE set_on_default_test_referencing;
|
||||||
|
|
||||||
|
CREATE SEQUENCE test_sequence;
|
||||||
|
CREATE TABLE set_on_default_test_referencing(
|
||||||
|
col_1 int, col_2 int, col_3 int DEFAULT nextval('test_sequence'), col_4 int
|
||||||
|
);
|
||||||
|
SELECT create_distributed_table('set_on_default_test_referencing', 'col_1');
|
||||||
|
|
||||||
|
-- from distributed to reference, fkey doesn't exist before calling the UDFs
|
||||||
|
ALTER TABLE set_on_default_test_referencing ADD CONSTRAINT fkey
|
||||||
|
FOREIGN KEY(col_1, col_3) REFERENCES set_on_default_test_referenced(col_1, col_3)
|
||||||
|
ON DELETE SET DEFAULT ON UPDATE SET DEFAULT;
|
||||||
|
|
||||||
|
DROP TABLE set_on_default_test_referenced;
|
||||||
|
CREATE TABLE set_on_default_test_referenced(
|
||||||
|
col_1 int, col_2 int, col_3 int, col_4 int,
|
||||||
|
unique (col_1, col_3)
|
||||||
|
);
|
||||||
|
SELECT create_distributed_table('set_on_default_test_referenced', 'col_1');
|
||||||
|
|
||||||
|
DROP TABLE set_on_default_test_referencing;
|
||||||
|
CREATE TABLE set_on_default_test_referencing(
|
||||||
|
col_1 bigserial, col_2 int, col_3 int DEFAULT nextval('test_sequence'), col_4 int,
|
||||||
|
FOREIGN KEY(col_1, col_3)
|
||||||
|
REFERENCES set_on_default_test_referenced(col_1, col_3)
|
||||||
|
ON DELETE SET DEFAULT
|
||||||
|
);
|
||||||
|
|
||||||
|
-- from distributed to distributed, fkey exists before calling the UDFs
|
||||||
|
SELECT create_distributed_table('set_on_default_test_referencing', 'col_1');
|
||||||
|
|
||||||
|
DROP TABLE set_on_default_test_referencing;
|
||||||
|
CREATE TABLE set_on_default_test_referencing(
|
||||||
|
col_1 int DEFAULT nextval('test_sequence'), col_2 int, col_3 int, col_4 int
|
||||||
|
);
|
||||||
|
SELECT create_distributed_table('set_on_default_test_referencing', 'col_1');
|
||||||
|
|
||||||
|
-- from distributed to distributed, fkey doesn't exist before calling the UDFs
|
||||||
|
ALTER TABLE set_on_default_test_referencing ADD CONSTRAINT fkey
|
||||||
|
FOREIGN KEY(col_1, col_3) REFERENCES set_on_default_test_referenced(col_1, col_3)
|
||||||
|
ON DELETE SET DEFAULT;
|
||||||
|
|
||||||
-- we no longer need those tables
|
-- we no longer need those tables
|
||||||
DROP TABLE referenced_by_reference_table, references_to_reference_table, reference_table, reference_table_second, referenced_local_table, self_referencing_reference_table, dropfkeytest2;
|
DROP TABLE referenced_by_reference_table, references_to_reference_table, reference_table, reference_table_second, referenced_local_table, self_referencing_reference_table, dropfkeytest2,
|
||||||
|
set_on_default_test_referenced, set_on_default_test_referencing;
|
||||||
|
|
|
@ -2002,6 +2002,30 @@ SELECT tablename, indexname FROM pg_indexes
|
||||||
WHERE schemaname = 'partitioning_schema' AND tablename ilike '%part_table_with_%' ORDER BY 1, 2;
|
WHERE schemaname = 'partitioning_schema' AND tablename ilike '%part_table_with_%' ORDER BY 1, 2;
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
SET search_path TO partitioning_schema;
|
||||||
|
|
||||||
|
-- create parent table
|
||||||
|
CREATE TABLE stxdinp(i int, a int, b int) PARTITION BY RANGE (i);
|
||||||
|
|
||||||
|
-- create partition
|
||||||
|
CREATE TABLE stxdinp1 PARTITION OF stxdinp FOR VALUES FROM (1) TO (100);
|
||||||
|
|
||||||
|
-- populate table
|
||||||
|
INSERT INTO stxdinp SELECT 1, a/100, a/100 FROM generate_series(1, 999) a;
|
||||||
|
|
||||||
|
-- create extended statistics
|
||||||
|
CREATE STATISTICS stxdinp ON a, b FROM stxdinp;
|
||||||
|
|
||||||
|
-- distribute parent table
|
||||||
|
SELECT create_distributed_table('stxdinp', 'i');
|
||||||
|
|
||||||
|
-- run select query, works fine
|
||||||
|
SELECT a, b FROM stxdinp GROUP BY 1, 2;
|
||||||
|
|
||||||
|
-- partitions are processed recursively for PG15+
|
||||||
|
VACUUM ANALYZE stxdinp;
|
||||||
|
SELECT a, b FROM stxdinp GROUP BY 1, 2;
|
||||||
|
|
||||||
DROP SCHEMA partitioning_schema CASCADE;
|
DROP SCHEMA partitioning_schema CASCADE;
|
||||||
RESET search_path;
|
RESET search_path;
|
||||||
DROP TABLE IF EXISTS
|
DROP TABLE IF EXISTS
|
||||||
|
|
|
@ -3,9 +3,6 @@
|
||||||
--
|
--
|
||||||
-- Tests select distinct, and select distinct on features.
|
-- Tests select distinct, and select distinct on features.
|
||||||
--
|
--
|
||||||
SHOW server_version \gset
|
|
||||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
|
||||||
|
|
||||||
ANALYZE lineitem_hash_part;
|
ANALYZE lineitem_hash_part;
|
||||||
|
|
||||||
-- function calls are supported
|
-- function calls are supported
|
||||||
|
|
|
@ -213,6 +213,47 @@ WHEN MATCHED THEN DELETE;
|
||||||
-- now, both distributed, not works
|
-- now, both distributed, not works
|
||||||
SELECT undistribute_table('tbl1');
|
SELECT undistribute_table('tbl1');
|
||||||
SELECT undistribute_table('tbl2');
|
SELECT undistribute_table('tbl2');
|
||||||
|
|
||||||
|
-- Make sure that we allow foreign key columns on local tables added to
|
||||||
|
-- metadata to have SET NULL/DEFAULT on column basis.
|
||||||
|
|
||||||
|
CREATE TABLE PKTABLE_local (tid int, id int, PRIMARY KEY (tid, id));
|
||||||
|
CREATE TABLE FKTABLE_local (
|
||||||
|
tid int, id int,
|
||||||
|
fk_id_del_set_null int,
|
||||||
|
fk_id_del_set_default int DEFAULT 0,
|
||||||
|
FOREIGN KEY (tid, fk_id_del_set_null) REFERENCES PKTABLE_local ON DELETE SET NULL (fk_id_del_set_null),
|
||||||
|
FOREIGN KEY (tid, fk_id_del_set_default) REFERENCES PKTABLE_local ON DELETE SET DEFAULT (fk_id_del_set_default)
|
||||||
|
);
|
||||||
|
|
||||||
|
SELECT citus_add_local_table_to_metadata('FKTABLE_local', cascade_via_foreign_keys=>true);
|
||||||
|
|
||||||
|
-- show that the definition is expected
|
||||||
|
SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conrelid = 'FKTABLE_local'::regclass::oid ORDER BY oid;
|
||||||
|
|
||||||
|
\c - - - :worker_1_port
|
||||||
|
|
||||||
|
SET search_path TO pg15;
|
||||||
|
|
||||||
|
-- show that the definition is expected on the worker as well
|
||||||
|
SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conrelid = 'FKTABLE_local'::regclass::oid ORDER BY oid;
|
||||||
|
|
||||||
|
-- also, make sure that it works as expected
|
||||||
|
INSERT INTO PKTABLE_local VALUES (1, 0), (1, 1), (1, 2);
|
||||||
|
INSERT INTO FKTABLE_local VALUES
|
||||||
|
(1, 1, 1, NULL),
|
||||||
|
(1, 2, NULL, 2);
|
||||||
|
DELETE FROM PKTABLE_local WHERE id = 1 OR id = 2;
|
||||||
|
SELECT * FROM FKTABLE_local ORDER BY id;
|
||||||
|
|
||||||
|
\c - - - :master_port
|
||||||
|
|
||||||
|
SET search_path TO pg15;
|
||||||
|
|
||||||
|
SET client_min_messages to ERROR;
|
||||||
|
DROP TABLE FKTABLE_local, PKTABLE_local;
|
||||||
|
RESET client_min_messages;
|
||||||
|
|
||||||
SELECT 1 FROM citus_remove_node('localhost', :master_port);
|
SELECT 1 FROM citus_remove_node('localhost', :master_port);
|
||||||
|
|
||||||
SELECT create_distributed_table('tbl1', 'x');
|
SELECT create_distributed_table('tbl1', 'x');
|
||||||
|
@ -540,8 +581,304 @@ CLUSTER sale_repl_factor_1 USING sale_repl_factor_1_pk;
|
||||||
-- verify that we can still cluster the partition tables now since replication factor is 1
|
-- verify that we can still cluster the partition tables now since replication factor is 1
|
||||||
CLUSTER sale_newyork_repl_factor_1 USING sale_newyork_repl_factor_1_pkey;
|
CLUSTER sale_newyork_repl_factor_1 USING sale_newyork_repl_factor_1_pkey;
|
||||||
|
|
||||||
|
create table reservations ( room_id integer not null, booked_during daterange );
|
||||||
|
insert into reservations values
|
||||||
|
-- 1: has a meets and a gap
|
||||||
|
(1, daterange('2018-07-01', '2018-07-07')),
|
||||||
|
(1, daterange('2018-07-07', '2018-07-14')),
|
||||||
|
(1, daterange('2018-07-20', '2018-07-22')),
|
||||||
|
-- 2: just a single row
|
||||||
|
(2, daterange('2018-07-01', '2018-07-03')),
|
||||||
|
-- 3: one null range
|
||||||
|
(3, NULL),
|
||||||
|
-- 4: two null ranges
|
||||||
|
(4, NULL),
|
||||||
|
(4, NULL),
|
||||||
|
-- 5: a null range and a non-null range
|
||||||
|
(5, NULL),
|
||||||
|
(5, daterange('2018-07-01', '2018-07-03')),
|
||||||
|
-- 6: has overlap
|
||||||
|
(6, daterange('2018-07-01', '2018-07-07')),
|
||||||
|
(6, daterange('2018-07-05', '2018-07-10')),
|
||||||
|
-- 7: two ranges that meet: no gap or overlap
|
||||||
|
(7, daterange('2018-07-01', '2018-07-07')),
|
||||||
|
(7, daterange('2018-07-07', '2018-07-14')),
|
||||||
|
-- 8: an empty range
|
||||||
|
(8, 'empty'::daterange);
|
||||||
|
SELECT create_distributed_table('reservations', 'room_id');
|
||||||
|
|
||||||
|
-- should be fine to pushdown range_agg
|
||||||
|
SELECT room_id, range_agg(booked_during ORDER BY booked_during)
|
||||||
|
FROM reservations
|
||||||
|
GROUP BY room_id
|
||||||
|
ORDER BY room_id;
|
||||||
|
|
||||||
|
-- should be fine to apply range_agg on the coordinator
|
||||||
|
SELECT room_id + 1, range_agg(booked_during ORDER BY booked_during)
|
||||||
|
FROM reservations
|
||||||
|
GROUP BY room_id + 1
|
||||||
|
ORDER BY room_id + 1;
|
||||||
|
|
||||||
|
-- min() and max() for xid8
|
||||||
|
create table xid8_t1 (x xid8, y int);
|
||||||
|
insert into xid8_t1 values ('0', 1), ('010', 2), ('42', 3), ('0xffffffffffffffff', 4), ('-1', 5);
|
||||||
|
SELECT create_distributed_table('xid8_t1', 'x');
|
||||||
|
select min(x), max(x) from xid8_t1 ORDER BY 1,2;
|
||||||
|
select min(x), max(x) from xid8_t1 GROUP BY x ORDER BY 1,2;
|
||||||
|
select min(x), max(x) from xid8_t1 GROUP BY y ORDER BY 1,2;
|
||||||
|
|
||||||
|
--
|
||||||
|
-- PG15 introduces security invoker views
|
||||||
|
-- Citus supports these views because permissions in the shards
|
||||||
|
-- are already checked for the view invoker
|
||||||
|
--
|
||||||
|
|
||||||
|
-- create a distributed table and populate it
|
||||||
|
CREATE TABLE events (tenant_id int, event_id int, descr text);
|
||||||
|
SELECT create_distributed_table('events','tenant_id');
|
||||||
|
INSERT INTO events VALUES (1, 1, 'push');
|
||||||
|
INSERT INTO events VALUES (2, 2, 'push');
|
||||||
|
|
||||||
|
-- create a security invoker view with underlying distributed table
|
||||||
|
-- the view will be distributed with security_invoker option as well
|
||||||
|
CREATE VIEW sec_invoker_view WITH (security_invoker=true) AS SELECT * FROM events;
|
||||||
|
|
||||||
|
\c - - - :worker_1_port
|
||||||
|
SELECT relname, reloptions FROM pg_class
|
||||||
|
WHERE relname = 'sec_invoker_view' AND relnamespace = 'pg15'::regnamespace;
|
||||||
|
|
||||||
|
\c - - - :master_port
|
||||||
|
SET search_path TO pg15;
|
||||||
|
|
||||||
|
-- test altering the security_invoker flag
|
||||||
|
ALTER VIEW sec_invoker_view SET (security_invoker = false);
|
||||||
|
|
||||||
|
\c - - - :worker_1_port
|
||||||
|
SELECT relname, reloptions FROM pg_class
|
||||||
|
WHERE relname = 'sec_invoker_view' AND relnamespace = 'pg15'::regnamespace;
|
||||||
|
|
||||||
|
\c - - - :master_port
|
||||||
|
SET search_path TO pg15;
|
||||||
|
|
||||||
|
ALTER VIEW sec_invoker_view SET (security_invoker = true);
|
||||||
|
|
||||||
|
-- create a new user but don't give select permission to events table
|
||||||
|
-- only give select permission to the view
|
||||||
|
CREATE ROLE rls_tenant_1 WITH LOGIN;
|
||||||
|
GRANT USAGE ON SCHEMA pg15 TO rls_tenant_1;
|
||||||
|
GRANT SELECT ON sec_invoker_view TO rls_tenant_1;
|
||||||
|
|
||||||
|
-- this user shouldn't be able to query the view
|
||||||
|
-- because the view is security invoker
|
||||||
|
-- which means it will check the invoker's rights
|
||||||
|
-- against the view's underlying tables
|
||||||
|
SET ROLE rls_tenant_1;
|
||||||
|
SELECT * FROM sec_invoker_view ORDER BY event_id;
|
||||||
|
RESET ROLE;
|
||||||
|
|
||||||
|
-- now grant select on the underlying distributed table
|
||||||
|
-- and try again
|
||||||
|
-- now it should work!
|
||||||
|
GRANT SELECT ON TABLE events TO rls_tenant_1;
|
||||||
|
SET ROLE rls_tenant_1;
|
||||||
|
SELECT * FROM sec_invoker_view ORDER BY event_id;
|
||||||
|
RESET ROLE;
|
||||||
|
|
||||||
|
-- Enable row level security
|
||||||
|
ALTER TABLE events ENABLE ROW LEVEL SECURITY;
|
||||||
|
|
||||||
|
-- Create policy for tenants to read access their own rows
|
||||||
|
CREATE POLICY user_mod ON events
|
||||||
|
FOR SELECT TO rls_tenant_1
|
||||||
|
USING (current_user = 'rls_tenant_' || tenant_id::text);
|
||||||
|
|
||||||
|
-- all rows should be visible because we are querying with
|
||||||
|
-- the table owner user now
|
||||||
|
SELECT * FROM sec_invoker_view ORDER BY event_id;
|
||||||
|
|
||||||
|
-- Switch user that has been granted rights,
|
||||||
|
-- should be able to see rows that the policy allows
|
||||||
|
SET ROLE rls_tenant_1;
|
||||||
|
SELECT * FROM sec_invoker_view ORDER BY event_id;
|
||||||
|
RESET ROLE;
|
||||||
|
|
||||||
|
-- ordinary view on top of security invoker view permissions
|
||||||
|
-- ordinary means security definer view
|
||||||
|
-- The PG expected behavior is that this doesn't change anything!!!
|
||||||
|
-- Can't escape security invoker views by defining a security definer view on top of it!
|
||||||
|
CREATE VIEW sec_definer_view AS SELECT * FROM sec_invoker_view ORDER BY event_id;
|
||||||
|
|
||||||
|
\c - - - :worker_1_port
|
||||||
|
SELECT relname, reloptions FROM pg_class
|
||||||
|
WHERE relname = 'sec_definer_view' AND relnamespace = 'pg15'::regnamespace;
|
||||||
|
|
||||||
|
\c - - - :master_port
|
||||||
|
SET search_path TO pg15;
|
||||||
|
|
||||||
|
CREATE ROLE rls_tenant_2 WITH LOGIN;
|
||||||
|
GRANT USAGE ON SCHEMA pg15 TO rls_tenant_2;
|
||||||
|
GRANT SELECT ON sec_definer_view TO rls_tenant_2;
|
||||||
|
|
||||||
|
-- it doesn't matter that the parent view is security definer
|
||||||
|
-- still the security invoker view will check the invoker's permissions
|
||||||
|
-- and will not allow rls_tenant_2 to query the view
|
||||||
|
SET ROLE rls_tenant_2;
|
||||||
|
SELECT * FROM sec_definer_view ORDER BY event_id;
|
||||||
|
RESET ROLE;
|
||||||
|
|
||||||
|
-- grant select rights to rls_tenant_2
|
||||||
|
GRANT SELECT ON TABLE events TO rls_tenant_2;
|
||||||
|
|
||||||
|
-- we still have row level security so rls_tenant_2
|
||||||
|
-- will be able to query but won't be able to see anything
|
||||||
|
SET ROLE rls_tenant_2;
|
||||||
|
SELECT * FROM sec_definer_view ORDER BY event_id;
|
||||||
|
RESET ROLE;
|
||||||
|
|
||||||
|
-- give some rights to rls_tenant_2
|
||||||
|
CREATE POLICY user_mod_1 ON events
|
||||||
|
FOR SELECT TO rls_tenant_2
|
||||||
|
USING (current_user = 'rls_tenant_' || tenant_id::text);
|
||||||
|
|
||||||
|
-- Row level security will be applied as well! We are safe!
|
||||||
|
SET ROLE rls_tenant_2;
|
||||||
|
SELECT * FROM sec_definer_view ORDER BY event_id;
|
||||||
|
RESET ROLE;
|
||||||
|
|
||||||
|
-- no need to test updatable views because they are currently not
|
||||||
|
-- supported in Citus when the query view contains citus tables
|
||||||
|
UPDATE sec_invoker_view SET event_id = 5;
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Not allow ON DELETE/UPDATE SET DEFAULT actions on columns that
|
||||||
|
-- default to sequences
|
||||||
|
-- Adding a special test here since in PG15 we can
|
||||||
|
-- specify column list for foreign key ON DELETE SET actions
|
||||||
|
-- Relevant PG commit:
|
||||||
|
-- d6f96ed94e73052f99a2e545ed17a8b2fdc1fb8a
|
||||||
|
--
|
||||||
|
|
||||||
|
CREATE TABLE set_on_default_test_referenced(
|
||||||
|
col_1 int, col_2 int, col_3 int, col_4 int,
|
||||||
|
unique (col_1, col_3)
|
||||||
|
);
|
||||||
|
SELECT create_reference_table('set_on_default_test_referenced');
|
||||||
|
|
||||||
|
CREATE TABLE set_on_default_test_referencing(
|
||||||
|
col_1 int, col_2 int, col_3 serial, col_4 int,
|
||||||
|
FOREIGN KEY(col_1, col_3)
|
||||||
|
REFERENCES set_on_default_test_referenced(col_1, col_3)
|
||||||
|
ON DELETE SET DEFAULT (col_1)
|
||||||
|
ON UPDATE SET DEFAULT
|
||||||
|
);
|
||||||
|
|
||||||
|
-- should error since col_3 defaults to a sequence
|
||||||
|
SELECT create_reference_table('set_on_default_test_referencing');
|
||||||
|
|
||||||
|
DROP TABLE set_on_default_test_referencing;
|
||||||
|
CREATE TABLE set_on_default_test_referencing(
|
||||||
|
col_1 int, col_2 int, col_3 serial, col_4 int,
|
||||||
|
FOREIGN KEY(col_1, col_3)
|
||||||
|
REFERENCES set_on_default_test_referenced(col_1, col_3)
|
||||||
|
ON DELETE SET DEFAULT (col_1)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- should not error since this doesn't set any sequence based columns to default
|
||||||
|
SELECT create_reference_table('set_on_default_test_referencing');
|
||||||
|
|
||||||
|
INSERT INTO set_on_default_test_referenced (col_1, col_3) VALUES (1, 1);
|
||||||
|
INSERT INTO set_on_default_test_referencing (col_1, col_3) VALUES (1, 1);
|
||||||
|
DELETE FROM set_on_default_test_referenced;
|
||||||
|
|
||||||
|
SELECT * FROM set_on_default_test_referencing ORDER BY 1,2;
|
||||||
|
|
||||||
|
DROP TABLE set_on_default_test_referencing;
|
||||||
|
|
||||||
|
SET client_min_messages to ERROR;
|
||||||
|
SELECT 1 FROM citus_add_node('localhost', :master_port, groupId => 0);
|
||||||
|
RESET client_min_messages;
|
||||||
|
|
||||||
|
-- should error since col_3 defaults to a sequence
|
||||||
|
CREATE TABLE set_on_default_test_referencing(
|
||||||
|
col_1 int, col_2 int, col_3 serial, col_4 int,
|
||||||
|
FOREIGN KEY(col_1, col_3)
|
||||||
|
REFERENCES set_on_default_test_referenced(col_1, col_3)
|
||||||
|
ON DELETE SET DEFAULT (col_3)
|
||||||
|
);
|
||||||
|
|
||||||
|
--
|
||||||
|
-- PG15 has suppressed some casts on constants when querying foreign tables
|
||||||
|
-- For example, we can use text to represent a type that's an enum on the remote side
|
||||||
|
-- A comparison on such a column will get shipped as "var = 'foo'::text"
|
||||||
|
-- But there's no enum = text operator on the remote side
|
||||||
|
-- If we leave off the explicit cast, the comparison will work
|
||||||
|
-- Test we behave in the same way with a Citus foreign table
|
||||||
|
-- Reminder: foreign tables cannot be distributed/reference, can only be Citus local
|
||||||
|
-- Relevant PG commit:
|
||||||
|
-- f8abb0f5e114d8c309239f0faa277b97f696d829
|
||||||
|
--
|
||||||
|
|
||||||
|
\set VERBOSITY terse
|
||||||
|
SET citus.next_shard_id TO 960200;
|
||||||
|
SET citus.enable_local_execution TO ON;
|
||||||
|
-- add the foreign table to metadata with the guc
|
||||||
|
SET citus.use_citus_managed_tables TO ON;
|
||||||
|
|
||||||
|
CREATE TYPE user_enum AS ENUM ('foo', 'bar', 'buz');
|
||||||
|
|
||||||
|
CREATE TABLE foreign_table_test (c0 integer NOT NULL, c1 user_enum);
|
||||||
|
INSERT INTO foreign_table_test VALUES (1, 'foo');
|
||||||
|
|
||||||
|
CREATE EXTENSION postgres_fdw;
|
||||||
|
|
||||||
|
CREATE SERVER foreign_server
|
||||||
|
FOREIGN DATA WRAPPER postgres_fdw
|
||||||
|
OPTIONS (host 'localhost', port :'master_port', dbname 'regression');
|
||||||
|
|
||||||
|
CREATE USER MAPPING FOR CURRENT_USER
|
||||||
|
SERVER foreign_server
|
||||||
|
OPTIONS (user 'postgres');
|
||||||
|
|
||||||
|
CREATE FOREIGN TABLE foreign_table (
|
||||||
|
c0 integer NOT NULL,
|
||||||
|
c1 text
|
||||||
|
)
|
||||||
|
SERVER foreign_server
|
||||||
|
OPTIONS (schema_name 'pg15', table_name 'foreign_table_test');
|
||||||
|
|
||||||
|
-- check that the foreign table is a citus local table
|
||||||
|
SELECT partmethod, repmodel FROM pg_dist_partition WHERE logicalrelid = 'foreign_table'::regclass ORDER BY logicalrelid;
|
||||||
|
|
||||||
|
-- same tests as in the relevant PG commit
|
||||||
|
-- Check that Remote SQL in the EXPLAIN doesn't contain casting
|
||||||
|
EXPLAIN (VERBOSE, COSTS OFF)
|
||||||
|
SELECT * FROM foreign_table WHERE c1 = 'foo' LIMIT 1;
|
||||||
|
SELECT * FROM foreign_table WHERE c1 = 'foo' LIMIT 1;
|
||||||
|
|
||||||
|
-- Check that Remote SQL in the EXPLAIN doesn't contain casting
|
||||||
|
EXPLAIN (VERBOSE, COSTS OFF)
|
||||||
|
SELECT * FROM foreign_table WHERE 'foo' = c1 LIMIT 1;
|
||||||
|
SELECT * FROM foreign_table WHERE 'foo' = c1 LIMIT 1;
|
||||||
|
|
||||||
|
-- we declared c1 to be text locally, but it's still the same type on
|
||||||
|
-- the remote which will balk if we try to do anything incompatible
|
||||||
|
-- with that remote type
|
||||||
|
SELECT * FROM foreign_table WHERE c1 LIKE 'foo' LIMIT 1; -- ERROR
|
||||||
|
SELECT * FROM foreign_table WHERE c1::text LIKE 'foo' LIMIT 1; -- ERROR; cast not pushed down
|
||||||
|
|
||||||
|
-- Clean up foreign table test
|
||||||
|
RESET citus.use_citus_managed_tables;
|
||||||
|
SELECT undistribute_table('foreign_table');
|
||||||
|
SELECT undistribute_table('foreign_table_test');
|
||||||
|
SELECT 1 FROM citus_remove_node('localhost', :master_port);
|
||||||
|
DROP SERVER foreign_server CASCADE;
|
||||||
|
|
||||||
|
-- PG15 now supports specifying oid on CREATE DATABASE
|
||||||
|
-- verify that we print meaningful notice messages.
|
||||||
|
CREATE DATABASE db_with_oid OID 987654;
|
||||||
|
DROP DATABASE db_with_oid;
|
||||||
|
|
||||||
-- Clean up
|
-- Clean up
|
||||||
RESET citus.shard_replication_factor;
|
|
||||||
\set VERBOSITY terse
|
\set VERBOSITY terse
|
||||||
SET client_min_messages TO ERROR;
|
SET client_min_messages TO ERROR;
|
||||||
DROP SCHEMA pg15 CASCADE;
|
DROP SCHEMA pg15 CASCADE;
|
||||||
|
|
|
@ -0,0 +1,236 @@
|
||||||
|
--
|
||||||
|
-- PG15 jsonpath tests
|
||||||
|
-- Relevant pg commit: e26114c817b610424010cfbe91a743f591246ff1
|
||||||
|
--
|
||||||
|
SHOW server_version \gset
|
||||||
|
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||||
|
\gset
|
||||||
|
\if :server_version_ge_15
|
||||||
|
\else
|
||||||
|
\q
|
||||||
|
\endif
|
||||||
|
|
||||||
|
CREATE SCHEMA jsonpath;
|
||||||
|
SET search_path TO jsonpath;
|
||||||
|
|
||||||
|
CREATE TABLE jsonpath_test (id serial, sample text);
|
||||||
|
SELECT create_distributed_table('jsonpath_test', 'id');
|
||||||
|
\COPY jsonpath_test(sample) FROM STDIN
|
||||||
|
$
|
||||||
|
strict $
|
||||||
|
lax $
|
||||||
|
$.a
|
||||||
|
$.a.v
|
||||||
|
$.a.*
|
||||||
|
$.*[*]
|
||||||
|
$.a[*]
|
||||||
|
$.a[*][*]
|
||||||
|
$[*]
|
||||||
|
$[0]
|
||||||
|
$[*][0]
|
||||||
|
$[*].a
|
||||||
|
$[*][0].a.b
|
||||||
|
$.a.**.b
|
||||||
|
$.a.**{2}.b
|
||||||
|
$.a.**{2 to 2}.b
|
||||||
|
$.a.**{2 to 5}.b
|
||||||
|
$.a.**{0 to 5}.b
|
||||||
|
$.a.**{5 to last}.b
|
||||||
|
$.a.**{last}.b
|
||||||
|
$.a.**{last to 5}.b
|
||||||
|
$+1
|
||||||
|
$-1
|
||||||
|
$--+1
|
||||||
|
$.a/+-1
|
||||||
|
1 * 2 + 4 % -3 != false
|
||||||
|
$.g ? ($.a == 1)
|
||||||
|
$.g ? (@ == 1)
|
||||||
|
$.g ? (@.a == 1)
|
||||||
|
$.g ? (@.a == 1 || @.a == 4)
|
||||||
|
$.g ? (@.a == 1 && @.a == 4)
|
||||||
|
$.g ? (@.a == 1 || @.a == 4 && @.b == 7)
|
||||||
|
$.g ? (@.a == 1 || !(@.a == 4) && @.b == 7)
|
||||||
|
$.g ? (@.a == 1 || !(@.x >= 123 || @.a == 4) && @.b == 7)
|
||||||
|
$.g ? (@.x >= @[*]?(@.a > "abc"))
|
||||||
|
$.g ? ((@.x >= 123 || @.a == 4) is unknown)
|
||||||
|
$.g ? (exists (@.x))
|
||||||
|
$.g ? (exists (@.x ? (@ == 14)))
|
||||||
|
$.g ? ((@.x >= 123 || @.a == 4) && exists (@.x ? (@ == 14)))
|
||||||
|
$.g ? (+@.x >= +-(+@.a + 2))
|
||||||
|
$a
|
||||||
|
$a.b
|
||||||
|
$a[*]
|
||||||
|
$.g ? (@.zip == $zip)
|
||||||
|
$.a[1,2, 3 to 16]
|
||||||
|
$.a[$a + 1, ($b[*]) to -($[0] * 2)]
|
||||||
|
$.a[$.a.size() - 3]
|
||||||
|
"last"
|
||||||
|
$.last
|
||||||
|
$[last]
|
||||||
|
$[$[0] ? (last > 0)]
|
||||||
|
null.type()
|
||||||
|
(1).type()
|
||||||
|
1.2.type()
|
||||||
|
"aaa".type()
|
||||||
|
true.type()
|
||||||
|
$.double().floor().ceiling().abs()
|
||||||
|
$.keyvalue().key
|
||||||
|
$.datetime()
|
||||||
|
$.datetime("datetime template")
|
||||||
|
$ ? (@ starts with "abc")
|
||||||
|
$ ? (@ starts with $var)
|
||||||
|
$ ? (@ like_regex "pattern")
|
||||||
|
$ ? (@ like_regex "pattern" flag "")
|
||||||
|
$ ? (@ like_regex "pattern" flag "i")
|
||||||
|
$ ? (@ like_regex "pattern" flag "is")
|
||||||
|
$ ? (@ like_regex "pattern" flag "isim")
|
||||||
|
$ ? (@ like_regex "pattern" flag "q")
|
||||||
|
$ ? (@ like_regex "pattern" flag "iq")
|
||||||
|
$ ? (@ like_regex "pattern" flag "smixq")
|
||||||
|
$ < 1
|
||||||
|
($ < 1) || $.a.b <= $x
|
||||||
|
($).a.b
|
||||||
|
($.a.b).c.d
|
||||||
|
($.a.b + -$.x.y).c.d
|
||||||
|
(-+$.a.b).c.d
|
||||||
|
1 + ($.a.b + 2).c.d
|
||||||
|
1 + ($.a.b > 2).c.d
|
||||||
|
($)
|
||||||
|
(($))
|
||||||
|
((($ + 1)).a + ((2)).b ? ((((@ > 1)) || (exists(@.c)))))
|
||||||
|
$ ? (@.a < 1)
|
||||||
|
$ ? (@.a < -1)
|
||||||
|
$ ? (@.a < +1)
|
||||||
|
$ ? (@.a < .1)
|
||||||
|
$ ? (@.a < -.1)
|
||||||
|
$ ? (@.a < +.1)
|
||||||
|
$ ? (@.a < 0.1)
|
||||||
|
$ ? (@.a < -0.1)
|
||||||
|
$ ? (@.a < +0.1)
|
||||||
|
$ ? (@.a < 10.1)
|
||||||
|
$ ? (@.a < -10.1)
|
||||||
|
$ ? (@.a < +10.1)
|
||||||
|
$ ? (@.a < 1e1)
|
||||||
|
$ ? (@.a < -1e1)
|
||||||
|
$ ? (@.a < +1e1)
|
||||||
|
$ ? (@.a < .1e1)
|
||||||
|
$ ? (@.a < -.1e1)
|
||||||
|
$ ? (@.a < +.1e1)
|
||||||
|
$ ? (@.a < 0.1e1)
|
||||||
|
$ ? (@.a < -0.1e1)
|
||||||
|
$ ? (@.a < +0.1e1)
|
||||||
|
$ ? (@.a < 10.1e1)
|
||||||
|
$ ? (@.a < -10.1e1)
|
||||||
|
$ ? (@.a < +10.1e1)
|
||||||
|
$ ? (@.a < 1e-1)
|
||||||
|
$ ? (@.a < -1e-1)
|
||||||
|
$ ? (@.a < +1e-1)
|
||||||
|
$ ? (@.a < .1e-1)
|
||||||
|
$ ? (@.a < -.1e-1)
|
||||||
|
$ ? (@.a < +.1e-1)
|
||||||
|
$ ? (@.a < 0.1e-1)
|
||||||
|
$ ? (@.a < -0.1e-1)
|
||||||
|
$ ? (@.a < +0.1e-1)
|
||||||
|
$ ? (@.a < 10.1e-1)
|
||||||
|
$ ? (@.a < -10.1e-1)
|
||||||
|
$ ? (@.a < +10.1e-1)
|
||||||
|
$ ? (@.a < 1e+1)
|
||||||
|
$ ? (@.a < -1e+1)
|
||||||
|
$ ? (@.a < +1e+1)
|
||||||
|
$ ? (@.a < .1e+1)
|
||||||
|
$ ? (@.a < -.1e+1)
|
||||||
|
$ ? (@.a < +.1e+1)
|
||||||
|
$ ? (@.a < 0.1e+1)
|
||||||
|
$ ? (@.a < -0.1e+1)
|
||||||
|
$ ? (@.a < +0.1e+1)
|
||||||
|
$ ? (@.a < 10.1e+1)
|
||||||
|
$ ? (@.a < -10.1e+1)
|
||||||
|
$ ? (@.a < +10.1e+1)
|
||||||
|
0
|
||||||
|
0.0
|
||||||
|
0.000
|
||||||
|
0.000e1
|
||||||
|
0.000e2
|
||||||
|
0.000e3
|
||||||
|
0.0010
|
||||||
|
0.0010e-1
|
||||||
|
0.0010e+1
|
||||||
|
0.0010e+2
|
||||||
|
.001
|
||||||
|
.001e1
|
||||||
|
1.
|
||||||
|
1.e1
|
||||||
|
1.2.e
|
||||||
|
(1.2).e
|
||||||
|
1e3
|
||||||
|
1.e3
|
||||||
|
1.e3.e
|
||||||
|
1.e3.e4
|
||||||
|
1.2e3
|
||||||
|
1.2.e3
|
||||||
|
(1.2).e3
|
||||||
|
1..e
|
||||||
|
1..e3
|
||||||
|
(1.).e
|
||||||
|
(1.).e3
|
||||||
|
1?(2>3)
|
||||||
|
\.
|
||||||
|
|
||||||
|
-- Cast the text into jsonpath on the worker nodes.
|
||||||
|
SELECT sample, sample::jsonpath FROM jsonpath_test ORDER BY id;
|
||||||
|
|
||||||
|
-- Pull the data, and cast on the coordinator node
|
||||||
|
WITH samples as (SELECT id, sample FROM jsonpath_test OFFSET 0)
|
||||||
|
SELECT sample, sample::jsonpath FROM samples ORDER BY id;
|
||||||
|
|
||||||
|
-- now test some cases where trailing junk causes errors
|
||||||
|
\COPY jsonpath_test(sample) FROM STDIN
|
||||||
|
|
||||||
|
last
|
||||||
|
1.type()
|
||||||
|
$ ? (@ like_regex "(invalid pattern")
|
||||||
|
$ ? (@ like_regex "pattern" flag "xsms")
|
||||||
|
@ + 1
|
||||||
|
00
|
||||||
|
1.e
|
||||||
|
1.2e3a
|
||||||
|
\.
|
||||||
|
|
||||||
|
-- the following tests try to evaluate type casting on worker, followed by coordinator
|
||||||
|
SELECT sample, sample::jsonpath FROM jsonpath_test WHERE sample = '';
|
||||||
|
WITH samples as (SELECT id, sample FROM jsonpath_test WHERE sample = '' OFFSET 0)
|
||||||
|
SELECT sample, sample::jsonpath FROM samples;
|
||||||
|
|
||||||
|
SELECT sample, sample::jsonpath FROM jsonpath_test WHERE sample = 'last';
|
||||||
|
WITH samples as (SELECT id, sample FROM jsonpath_test WHERE sample = 'last' OFFSET 0)
|
||||||
|
SELECT sample, sample::jsonpath FROM samples;
|
||||||
|
|
||||||
|
SELECT sample, sample::jsonpath FROM jsonpath_test WHERE sample = '1.type()';
|
||||||
|
WITH samples as (SELECT id, sample FROM jsonpath_test WHERE sample = '1.type()' OFFSET 0)
|
||||||
|
SELECT sample, sample::jsonpath FROM samples;
|
||||||
|
|
||||||
|
SELECT sample, sample::jsonpath FROM jsonpath_test WHERE sample = '$ ? (@ like_regex "(invalid pattern")';
|
||||||
|
WITH samples as (SELECT id, sample FROM jsonpath_test WHERE sample = '$ ? (@ like_regex "(invalid pattern")' OFFSET 0)
|
||||||
|
SELECT sample, sample::jsonpath FROM samples;
|
||||||
|
|
||||||
|
SELECT sample, sample::jsonpath FROM jsonpath_test WHERE sample = '$ ? (@ like_regex "pattern" flag "xsms")';
|
||||||
|
WITH samples as (SELECT id, sample FROM jsonpath_test WHERE sample = '$ ? (@ like_regex "pattern" flag "xsms")' OFFSET 0)
|
||||||
|
SELECT sample, sample::jsonpath FROM samples;
|
||||||
|
|
||||||
|
SELECT sample, sample::jsonpath FROM jsonpath_test WHERE sample = '@ + 1';
|
||||||
|
WITH samples as (SELECT id, sample FROM jsonpath_test WHERE sample = '@ + 1' OFFSET 0)
|
||||||
|
SELECT sample, sample::jsonpath FROM samples;
|
||||||
|
|
||||||
|
SELECT sample, sample::jsonpath FROM jsonpath_test WHERE sample = '00';
|
||||||
|
WITH samples as (SELECT id, sample FROM jsonpath_test WHERE sample = '00' OFFSET 0)
|
||||||
|
SELECT sample, sample::jsonpath FROM samples;
|
||||||
|
|
||||||
|
SELECT sample, sample::jsonpath FROM jsonpath_test WHERE sample = '1.e';
|
||||||
|
WITH samples as (SELECT id, sample FROM jsonpath_test WHERE sample = '1.e' OFFSET 0)
|
||||||
|
SELECT sample, sample::jsonpath FROM samples;
|
||||||
|
|
||||||
|
SELECT sample, sample::jsonpath FROM jsonpath_test WHERE sample = '1.2e3a';
|
||||||
|
WITH samples as (SELECT id, sample FROM jsonpath_test WHERE sample = '1.2e3a' OFFSET 0)
|
||||||
|
SELECT sample, sample::jsonpath FROM samples;
|
||||||
|
|
||||||
|
DROP SCHEMA jsonpath CASCADE;
|
|
@ -159,5 +159,44 @@ BEGIN;
|
||||||
CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE SET DEFAULT);
|
CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE SET DEFAULT);
|
||||||
ROLLBACK;
|
ROLLBACK;
|
||||||
|
|
||||||
|
CREATE TABLE set_on_default_test_referenced(
|
||||||
|
col_1 int, col_2 int, col_3 int, col_4 int,
|
||||||
|
unique (col_1, col_3)
|
||||||
|
);
|
||||||
|
SELECT create_reference_table('set_on_default_test_referenced');
|
||||||
|
|
||||||
|
-- from citus local to reference - 1
|
||||||
|
CREATE TABLE set_on_default_test_referencing(
|
||||||
|
col_1 int, col_2 int, col_3 serial, col_4 int,
|
||||||
|
FOREIGN KEY(col_1, col_3)
|
||||||
|
REFERENCES set_on_default_test_referenced(col_1, col_3)
|
||||||
|
ON UPDATE SET DEFAULT
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE set_on_default_test_referencing(
|
||||||
|
col_1 serial, col_2 int, col_3 int, col_4 int
|
||||||
|
);
|
||||||
|
|
||||||
|
-- from citus local to reference - 2
|
||||||
|
ALTER TABLE set_on_default_test_referencing ADD CONSTRAINT fkey
|
||||||
|
FOREIGN KEY(col_1, col_3) REFERENCES set_on_default_test_referenced(col_1, col_3)
|
||||||
|
ON DELETE SET DEFAULT;
|
||||||
|
|
||||||
|
DROP TABLE set_on_default_test_referencing, set_on_default_test_referenced;
|
||||||
|
|
||||||
|
CREATE TABLE set_on_default_test_referenced(
|
||||||
|
col_1 int, col_2 int, col_3 int, col_4 int,
|
||||||
|
unique (col_1, col_3)
|
||||||
|
);
|
||||||
|
SELECT citus_add_local_table_to_metadata('set_on_default_test_referenced');
|
||||||
|
|
||||||
|
-- from citus local to citus local
|
||||||
|
CREATE TABLE set_on_default_test_referencing(
|
||||||
|
col_1 int, col_2 int, col_3 serial, col_4 int,
|
||||||
|
FOREIGN KEY(col_1, col_3)
|
||||||
|
REFERENCES set_on_default_test_referenced(col_1, col_3)
|
||||||
|
ON DELETE SET DEFAULT
|
||||||
|
);
|
||||||
|
|
||||||
-- cleanup at exit
|
-- cleanup at exit
|
||||||
DROP SCHEMA ref_citus_local_fkeys CASCADE;
|
DROP SCHEMA ref_citus_local_fkeys CASCADE;
|
||||||
|
|
Loading…
Reference in New Issue