mirror of https://github.com/citusdata/citus.git
Pg15 support (#6085)
* Adjust configure script to allow PG15
* Adds copy of ruleutils_14.c as ruleutils_15.c
* Uses get_namespace_name_or_temp in ruleutils_15.c
Relevant PG commit:
48c5c9068211e0a04fd9553c8714b2821ed3ad17
* Clean up code using "(expr) ? true : false" in ruleutils_15.c
Relevant PG commit:
fd0625c7a9c679c0c1e896014b8f49a489c3a245
* Change varno from Index (unsigned int) to int in ruleutils_15.c
Relevant PG commit:
e3ec3c00d85bd2844ffddee83df2bd67c4f8297f
* Adds find_recursive_union to ruleutils_15.c
Relevant PG commit:
3f50b82639637c9908afa2087de7588450aa866b
* Fix display of SQL-std func's args in INSERT/SELECT in ruleutils_15.c
Relevant PG commit:
a8d8445a7b2f80f6d0bfe97b19f90bd2cbef8759
* Fix ruleutils_15.c's dumping of whole-row Vars in more contexts
Relevant PG commit:
43c2175121c829c8591fc5117b725f1f22bfb670
* Fix assorted missing logic for GroupingFunc nodes in ruleutils_15.c
Relevant PG commit:
2591ee8ec44d8cbc8e1226550337a64c684746e4
* Adds grammar support for SQL/JSON clauses in ruleutils_15.c
Relevant PG commit:
f79b803dcc98d707450e158db3638dc67ff8380b
* Adds SQL/JSON constructors to ruleutils_15.c
Relevant PG commits:
f4fb45d15c59d7add2e1b81a9d477d0119a9691a
cc7401d5ca498a84d9b47fd2e01cebd8e830e558
* Adds support for MERGE in ruleutils_15.c
Relevant PG commit:
7103ebb7aae8ab8076b7e85f335ceb8fe799097c
* Add IS JSON predicate to ruleutils_15.c
Relevant PG commit:
33a377608fc29cdd1f6b63be561eab0aee5c81f0
* Add SQL/JSON query functions to ruleutils_15.c
Relevant PG commit:
1a36bc9dba8eae90963a586d37b6457b32b2fed4
* Adds three different SQL/JSON values to ruleutils_15.c
Relevant PG commits:
606948b058dc16bce494270eea577011a602810e
49082c2cc3d8167cca70cfe697afb064710828ca
* Adds JSON table functions in ruleutils_15.c
Relevant PG commit:
4e34747c88a03ede6e9d731727815e37273d4bc9
* Add PLAN function for JSON table in ruleutils_15.c
Relevant PG commit:
fadb48b00e02ccfd152baa80942de30205ab3c4f
* Remove extra blank lines before block-closing braces ruleutils_15.c
Relevant PG commit:
24d2b2680a8d0e01b30ce8a41c4eb3b47aca5031
* set_deparse_plan: Reuse variable to appease Coverity ruleutils_15.c
Relevant PG commit:
e70813fbc4aaca35ec012d5a426706bd54e4acab
* Mechanical code beautification ruleutils_15.c
Relevant PG commit:
23e7b38bfe396f919fdb66057174d29e17086418
* Rename value_type to item_type in ruleutils_15.c
Relevant PG commit:
3ab9a63cb638a1fd99475668e2da9c237495aeda
* Show 'AS "?column?"' explicitly when it's important in ruleutils_15.c
Relevant PG commit:
c7461fc25558832dd347a9c8150b0f1ed85e36e8
* Fix ruleutils_15.c issues with dropped cols in funcs-returning-composite
Relevant PG commit:
c1d1e8469c77ce6b8e5310955580b4a3eee7fe96
* Change comment regarding functions returning composite in ruleutils_15.c
Relevant PG commit:
c2fa113ddb1117b1f03e91960f65d5d7d8a90270
* Replace int nodes with bool nodes where needed
In PG15, Boolean nodes are added. Pre PG15, internal Boolean values
in Create Role commands were represented by Integer nodes. This
commit replaces int nodes logic with bool nodes logic where needed.
Mostly there are CREATE ROLE logic changes.
Relevant PG commit:
941460fcf731a32e6a90691508d5cfa3d1f8eeaf
* Handle new option colliculocale in CREATE COLLATION logic
In PG15, there is an added option to use ICU as global locale provider.
pg_collation has three locale-related fields: collcollate and collctype,
which are libc-related fields, and a new one colliculocale, which is the
ICU-related field. Only the libc-related fields or the ICU-related field
is set, never both.
Relevant PG commits:
f2553d43060edb210b36c63187d52a632448e1d2
54637508f87bd5f07fb9406bac6b08240283be3b
* Add PG15 tests to CI using test images that have 15beta2 (#6093)
* Change warning message in pg_signal_backend()
Relevant PG commit:
7fa945b857cc1b2964799411f1633468826861ff
* Revert "Add missing ifdef for PG 15"
This reverts commit c7b51025ab
.
* Fixes tests for ALTER TRIGGER RENAME consistency for part. tables
Relevant PG commit:
80ba4bb383538a2ee846fece6a7b8da9518b6866
* Prevent creating child triggers on partitions when adding new node
Pre PG15, tgisinternal is true for a "child" trigger on a partition
cloned from the trigger on the parent.
In PG15, tgisinternal is false in that case. However, we don't want to
create this trigger on the partition since it will create a conflict
when we try to attach the partition to the parent table:
ERROR: trigger "..." for relation "{partition_name}" already exists
Relevant PG commit:
f4566345cf40b068368cb5617e61318da60676ec
* Fix tests for generated columns dependency changes
In PG15, For GENERATED columns, all dependencies of the generation
expression are recorded as NORMAL dependencies of the column itself.
This requires CASCADE to drop generated cols with the original col.
PRE PG15, dependencies were recorded as AUTO, with which
generated columns are silently dropped with the original column.
Relevant PG commit:
cb02fcb4c95bae08adaca1202c2081cfc81a28b5
* Explicitly cast catalog "char" column to text before concatenation
Relevant PG commit:
07eee5a0dc642d26f44d65c4e6263304208e8583
* Remove 'AS "?column?"' from test outputs
There were some instances in the following tst outputs
in planning debug outputs where AS "?column?" is added.
We add a normalization rule to remove it as it is not
important.
cte_inline.out
recursive_relation_planning_restriction_pushdown.out
Relevant PG commit:
c7461fc25558832dd347a9c8150b0f1ed85e36e8
* Use pg_backup_stop(PG15) instead of pg_stop_backup(PG<15)
Add an alternative test output because of the change in the
backup modes of Postgres. Specifically here, there is a renaming
issue: pg_stop_backup PRE PG15 vs pg_backup_stop PG15+
The alternative output can be deleted when we drop support for PG14
Relevant PG commit:
39969e2a1e4d7f5a37f3ef37d53bbfe171e7d77a
* Adds citus.mitmfifo GUC
Previously we setting this configuration parameter
in the fly for failure tests schedule.
However, PG15 doesn't allow that anymore: reserved prefixes
like "citus" cannot be used to set non-existing GUCs.
Relevant PG commit:
88103567cb8fa5be46dc9fac3e3b8774951a2be7
* Handles EXPLAIN output diffs in PG15 - Extra result lines
To handle extra "Result" lines in explain outputs, we add explain
method to multi_test_helpers.sql file
- plan_without_result_lines() is added for cases where we want the
whole explain output with only "Result" lines removed
* Handles EXPLAIN output diffs in PG15, Hash Agg/Join leverage
To handle differences in usage of GroupAggregate vs HashAggregate
or Merge Join vs Hash join in cases where this detail doesn't
seem to matter, we use coordinator_plan().
- coordinator_plan() is updated to remove "Result" lines
There are some cases where we have subplans so we add a new
function that prints all Task Count lines as well
- coordinator_plan_with_subplans()
Still not sure of the relevant PG commit
Could be db0d67db2401eb6238ccc04c6407a4fd4f985832
but disabling enable_group_by_reordering didn't help.
* Handles EXPLAIN output diffs in PG15: enable_group_by_reordering
Relevant PG commit
db0d67db2401eb6238ccc04c6407a4fd4f985832
* Normalizes Memory Usage, Buckets, Batches for PG15 explain diffs
We create a new function in multi_test_helpers, which is similar
to explain_merge function in PG15. This explain helper function
normalies Memory Usage, Buckets and Batches, and we use it in the
tests which give a different output for PG15.
* Bump test images to 15beta3 (#6172)
* Omit namespace in post-copy errmsg
Relevant PG commit:
069d33d0c5a021601245e44df77a0423ddd69359
* Handles EXPLAIN output diffs in PG15: extra arrows&result lines
To handle extra "->" arrows resulting from extra Result lines
in explain outputs, we add the following explain method to
multi_test_helpers.sql file
- plan_without_arrows() is added for cases where we want the
whole explain output without arrows and without Result lines
* Alters public schema's owner to pg_database_owner in PG15
In PG15, public schema is owned by pg_database_owner role.
In multi_extension, we drop and recreate the ppublic schema,
hence its owner become the default user in our tests, postgres.
Change that to pg_database_owner for PG15 consistency.
This results in alternative test output for public schema grants
in the following test:
grant_on_schema_propagation.sql
Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62
* Add alternative test outputs for change in Insert Select display
citus_local_tables_queries.sql
coordinator_shouldhaveshards.sql
cte_inline.sql
insert_select_repartition.sql
intermediate_result_pruning.sql
local_shard_execution.sql
local_shard_execution_replicated.sql
multi_deparse_shard_query.sql
multi_insert_select.sql
multi_insert_select_conflict.sql
multi_mx_insert_select_repartition.sql
mx_coordinator_shouldhaveshards.sql
single_node.sql
Relevant PG commit:
a8d8445a7b2f80f6d0bfe97b19f90bd2cbef8759
* Fixes columnar tap tests for PG15
In PG15, Perl test modules have been moved to a new namespace.
Also, postgres node new() and get_new_node() methods have been
unified to one method: new()
We create separate tap tests for PG13/14 and PG15+
and update the Makefiles accordingly.
Relevant PG commits:
201a76183e2056c2217129e12d68c25ec9c559c8
b3b4d8e68ae83f432f43f035c7eb481ef93e1583
* Handles EXPLAIN output diffs in PG15: HashAgg Leverage,alt. output
Still not sure of the relevant PG commit
Could be db0d67db2401eb6238ccc04c6407a4fd4f985832
but disabling enable_group_by_reordering didn't help.
pull/6241/head
parent
ddbd10d2e7
commit
35b4ddc355
|
@ -13,6 +13,9 @@ parameters:
|
|||
pg14_version:
|
||||
type: string
|
||||
default: '14.5'
|
||||
pg15_version:
|
||||
type: string
|
||||
default: '15beta3'
|
||||
upgrade_pg_versions:
|
||||
type: string
|
||||
default: '13.8-14.5-15beta3'
|
||||
|
@ -538,6 +541,10 @@ workflows:
|
|||
name: build-14
|
||||
pg_major: 14
|
||||
image_tag: '<< pipeline.parameters.pg14_version >>'
|
||||
- build:
|
||||
name: build-15
|
||||
pg_major: 15
|
||||
image_tag: '<< pipeline.parameters.pg15_version >>'
|
||||
|
||||
- check-style
|
||||
- check-sql-snapshots
|
||||
|
@ -777,6 +784,123 @@ workflows:
|
|||
make: check-failure
|
||||
requires: [build-14]
|
||||
|
||||
- test-citus:
|
||||
name: 'test-15_check-split'
|
||||
pg_major: 15
|
||||
image_tag: '<< pipeline.parameters.pg15_version >>'
|
||||
make: check-split
|
||||
requires: [build-15]
|
||||
- test-citus:
|
||||
name: 'test-15_check-enterprise'
|
||||
pg_major: 15
|
||||
image_tag: '<< pipeline.parameters.pg15_version >>'
|
||||
make: check-enterprise
|
||||
requires: [build-15]
|
||||
- test-citus:
|
||||
name: 'test-15_check-enterprise-isolation'
|
||||
pg_major: 15
|
||||
image_tag: '<< pipeline.parameters.pg15_version >>'
|
||||
make: check-enterprise-isolation
|
||||
requires: [build-15]
|
||||
- test-citus:
|
||||
name: 'test-15_check-enterprise-isolation-logicalrep-1'
|
||||
pg_major: 15
|
||||
image_tag: '<< pipeline.parameters.pg15_version >>'
|
||||
make: check-enterprise-isolation-logicalrep-1
|
||||
requires: [build-15]
|
||||
- test-citus:
|
||||
name: 'test-15_check-enterprise-isolation-logicalrep-2'
|
||||
pg_major: 15
|
||||
image_tag: '<< pipeline.parameters.pg15_version >>'
|
||||
make: check-enterprise-isolation-logicalrep-2
|
||||
requires: [build-15]
|
||||
- test-citus:
|
||||
name: 'test-15_check-enterprise-isolation-logicalrep-3'
|
||||
pg_major: 15
|
||||
image_tag: '<< pipeline.parameters.pg15_version >>'
|
||||
make: check-enterprise-isolation-logicalrep-3
|
||||
requires: [build-15]
|
||||
- test-citus:
|
||||
name: 'test-15_check-enterprise-failure'
|
||||
pg_major: 15
|
||||
image: citus/failtester
|
||||
image_tag: '<< pipeline.parameters.pg15_version >>'
|
||||
make: check-enterprise-failure
|
||||
requires: [build-15]
|
||||
- test-citus:
|
||||
name: 'test-15_check-multi'
|
||||
pg_major: 15
|
||||
image_tag: '<< pipeline.parameters.pg15_version >>'
|
||||
make: check-multi
|
||||
requires: [build-15]
|
||||
- test-citus:
|
||||
name: 'test-15_check-multi-1'
|
||||
pg_major: 15
|
||||
image_tag: '<< pipeline.parameters.pg15_version >>'
|
||||
make: check-multi-1
|
||||
requires: [build-15]
|
||||
- test-citus:
|
||||
name: 'test-15_check-mx'
|
||||
pg_major: 15
|
||||
image_tag: '<< pipeline.parameters.pg15_version >>'
|
||||
make: check-multi-mx
|
||||
requires: [build-15]
|
||||
- test-citus:
|
||||
name: 'test-15_check-vanilla'
|
||||
pg_major: 15
|
||||
image_tag: '<< pipeline.parameters.pg15_version >>'
|
||||
make: check-vanilla
|
||||
requires: [build-15]
|
||||
- test-citus:
|
||||
name: 'test-15_check-isolation'
|
||||
pg_major: 15
|
||||
image_tag: '<< pipeline.parameters.pg15_version >>'
|
||||
make: check-isolation
|
||||
requires: [build-15]
|
||||
- test-citus:
|
||||
name: 'test-15_check-operations'
|
||||
pg_major: 15
|
||||
image_tag: '<< pipeline.parameters.pg15_version >>'
|
||||
make: check-operations
|
||||
requires: [build-15]
|
||||
- test-citus:
|
||||
name: 'test-15_check-follower-cluster'
|
||||
pg_major: 15
|
||||
image_tag: '<< pipeline.parameters.pg15_version >>'
|
||||
make: check-follower-cluster
|
||||
requires: [build-15]
|
||||
- test-citus:
|
||||
name: 'test-15_check-columnar'
|
||||
pg_major: 15
|
||||
image_tag: '<< pipeline.parameters.pg15_version >>'
|
||||
make: check-columnar
|
||||
requires: [build-15]
|
||||
- test-citus:
|
||||
name: 'test-15_check-columnar-isolation'
|
||||
pg_major: 15
|
||||
image_tag: '<< pipeline.parameters.pg15_version >>'
|
||||
make: check-columnar-isolation
|
||||
requires: [build-15]
|
||||
- tap-test-citus:
|
||||
name: 'test-15_tap-recovery'
|
||||
pg_major: 15
|
||||
image_tag: '<< pipeline.parameters.pg15_version >>'
|
||||
suite: recovery
|
||||
requires: [build-15]
|
||||
- tap-test-citus:
|
||||
name: 'test-15_tap-columnar-freezing'
|
||||
pg_major: 15
|
||||
image_tag: '<< pipeline.parameters.pg15_version >>'
|
||||
suite: columnar_freezing
|
||||
requires: [build-15]
|
||||
- test-citus:
|
||||
name: 'test-15_check-failure'
|
||||
pg_major: 15
|
||||
image: citus/failtester
|
||||
image_tag: '<< pipeline.parameters.pg15_version >>'
|
||||
make: check-failure
|
||||
requires: [build-15]
|
||||
|
||||
- test-arbitrary-configs:
|
||||
name: 'test-13_check-arbitrary-configs'
|
||||
pg_major: 13
|
||||
|
@ -787,6 +911,11 @@ workflows:
|
|||
pg_major: 14
|
||||
image_tag: '<< pipeline.parameters.pg14_version >>'
|
||||
requires: [build-14]
|
||||
- test-arbitrary-configs:
|
||||
name: 'test-15_check-arbitrary-configs'
|
||||
pg_major: 15
|
||||
image_tag: '<< pipeline.parameters.pg15_version >>'
|
||||
requires: [build-15]
|
||||
|
||||
- test-pg-upgrade:
|
||||
name: 'test-13-14_check-pg-upgrade'
|
||||
|
@ -795,6 +924,13 @@ workflows:
|
|||
image_tag: '<< pipeline.parameters.upgrade_pg_versions >>'
|
||||
requires: [build-13, build-14]
|
||||
|
||||
- test-pg-upgrade:
|
||||
name: 'test-14-15_check-pg-upgrade'
|
||||
old_pg_major: 14
|
||||
new_pg_major: 15
|
||||
image_tag: '<< pipeline.parameters.upgrade_pg_versions >>'
|
||||
requires: [build-14, build-15]
|
||||
|
||||
- test-citus-upgrade:
|
||||
name: test-13_check-citus-upgrade
|
||||
pg_major: 13
|
||||
|
|
|
@ -27,6 +27,7 @@ configure -whitespace
|
|||
src/backend/distributed/utils/citus_outfuncs.c -citus-style
|
||||
src/backend/distributed/deparser/ruleutils_13.c -citus-style
|
||||
src/backend/distributed/deparser/ruleutils_14.c -citus-style
|
||||
src/backend/distributed/deparser/ruleutils_15.c -citus-style
|
||||
src/backend/distributed/commands/index_pg_source.c -citus-style
|
||||
|
||||
src/include/distributed/citus_nodes.h -citus-style
|
||||
|
|
|
@ -2588,7 +2588,7 @@ fi
|
|||
if test "$with_pg_version_check" = no; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num (skipped compatibility check)" >&5
|
||||
$as_echo "$as_me: building against PostgreSQL $version_num (skipped compatibility check)" >&6;}
|
||||
elif test "$version_num" != '13' -a "$version_num" != '14'; then
|
||||
elif test "$version_num" != '13' -a "$version_num" != '14' -a "$version_num" != '15'; then
|
||||
as_fn_error $? "Citus is not compatible with the detected PostgreSQL version ${version_num}." "$LINENO" 5
|
||||
else
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num" >&5
|
||||
|
|
|
@ -80,7 +80,7 @@ AC_SUBST(with_pg_version_check)
|
|||
|
||||
if test "$with_pg_version_check" = no; then
|
||||
AC_MSG_NOTICE([building against PostgreSQL $version_num (skipped compatibility check)])
|
||||
elif test "$version_num" != '13' -a "$version_num" != '14'; then
|
||||
elif test "$version_num" != '13' -a "$version_num" != '14' -a "$version_num" != '15'; then
|
||||
AC_MSG_ERROR([Citus is not compatible with the detected PostgreSQL version ${version_num}.])
|
||||
else
|
||||
AC_MSG_NOTICE([building against PostgreSQL $version_num])
|
||||
|
|
|
@ -63,15 +63,53 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
|
|||
const char *collname = NameStr(collationForm->collname);
|
||||
bool collisdeterministic = collationForm->collisdeterministic;
|
||||
|
||||
char *collcollate;
|
||||
char *collctype;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
|
||||
/*
|
||||
* In PG15, there is an added option to use ICU as global locale provider.
|
||||
* pg_collation has three locale-related fields: collcollate and collctype,
|
||||
* which are libc-related fields, and a new one colliculocale, which is the
|
||||
* ICU-related field. Only the libc-related fields or the ICU-related field
|
||||
* is set, never both.
|
||||
*/
|
||||
char *colliculocale;
|
||||
bool isnull;
|
||||
|
||||
Datum datum = SysCacheGetAttr(COLLOID, heapTuple, Anum_pg_collation_collcollate,
|
||||
&isnull);
|
||||
Assert(!isnull);
|
||||
char *collcollate = TextDatumGetCString(datum);
|
||||
if (!isnull)
|
||||
{
|
||||
collcollate = TextDatumGetCString(datum);
|
||||
}
|
||||
else
|
||||
{
|
||||
collcollate = NULL;
|
||||
}
|
||||
|
||||
datum = SysCacheGetAttr(COLLOID, heapTuple, Anum_pg_collation_collctype, &isnull);
|
||||
Assert(!isnull);
|
||||
char *collctype = TextDatumGetCString(datum);
|
||||
if (!isnull)
|
||||
{
|
||||
collctype = TextDatumGetCString(datum);
|
||||
}
|
||||
else
|
||||
{
|
||||
collctype = NULL;
|
||||
}
|
||||
|
||||
datum = SysCacheGetAttr(COLLOID, heapTuple, Anum_pg_collation_colliculocale, &isnull);
|
||||
if (!isnull)
|
||||
{
|
||||
colliculocale = TextDatumGetCString(datum);
|
||||
}
|
||||
else
|
||||
{
|
||||
colliculocale = NULL;
|
||||
}
|
||||
|
||||
AssertArg((collcollate && collctype) || colliculocale);
|
||||
#else
|
||||
|
||||
/*
|
||||
|
@ -79,8 +117,8 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
|
|||
* pstrdup() to match the interface of 15 so that we consistently free the
|
||||
* result later.
|
||||
*/
|
||||
char *collcollate = pstrdup(NameStr(collationForm->collcollate));
|
||||
char *collctype = pstrdup(NameStr(collationForm->collctype));
|
||||
collcollate = pstrdup(NameStr(collationForm->collcollate));
|
||||
collctype = pstrdup(NameStr(collationForm->collctype));
|
||||
#endif
|
||||
|
||||
if (collowner != NULL)
|
||||
|
@ -106,6 +144,33 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
|
|||
"CREATE COLLATION %s (provider = '%s'",
|
||||
*quotedCollationName, providerString);
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
if (colliculocale)
|
||||
{
|
||||
appendStringInfo(&collationNameDef,
|
||||
", locale = %s",
|
||||
quote_literal_cstr(colliculocale));
|
||||
pfree(colliculocale);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (strcmp(collcollate, collctype) == 0)
|
||||
{
|
||||
appendStringInfo(&collationNameDef,
|
||||
", locale = %s",
|
||||
quote_literal_cstr(collcollate));
|
||||
}
|
||||
else
|
||||
{
|
||||
appendStringInfo(&collationNameDef,
|
||||
", lc_collate = %s, lc_ctype = %s",
|
||||
quote_literal_cstr(collcollate),
|
||||
quote_literal_cstr(collctype));
|
||||
}
|
||||
pfree(collcollate);
|
||||
pfree(collctype);
|
||||
}
|
||||
#else
|
||||
if (strcmp(collcollate, collctype) == 0)
|
||||
{
|
||||
appendStringInfo(&collationNameDef,
|
||||
|
@ -122,6 +187,7 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
|
|||
|
||||
pfree(collcollate);
|
||||
pfree(collctype);
|
||||
#endif
|
||||
|
||||
if (!collisdeterministic)
|
||||
{
|
||||
|
|
|
@ -10,6 +10,8 @@
|
|||
|
||||
#include "postgres.h"
|
||||
|
||||
#include "pg_version_compat.h"
|
||||
|
||||
#include "distributed/pg_version_constants.h"
|
||||
|
||||
#include "access/heapam.h"
|
||||
|
@ -59,6 +61,7 @@ static char * CreateCreateOrAlterRoleCommand(const char *roleName,
|
|||
CreateRoleStmt *createRoleStmt,
|
||||
AlterRoleStmt *alterRoleStmt);
|
||||
static DefElem * makeDefElemInt(char *name, int value);
|
||||
static DefElem * makeDefElemBool(char *name, bool value);
|
||||
static List * GenerateRoleOptionsList(HeapTuple tuple);
|
||||
static List * GenerateGrantRoleStmtsFromOptions(RoleSpec *roleSpec, List *options);
|
||||
static List * GenerateGrantRoleStmtsOfRole(Oid roleid);
|
||||
|
@ -454,13 +457,13 @@ GenerateRoleOptionsList(HeapTuple tuple)
|
|||
Form_pg_authid role = ((Form_pg_authid) GETSTRUCT(tuple));
|
||||
|
||||
List *options = NIL;
|
||||
options = lappend(options, makeDefElemInt("superuser", role->rolsuper));
|
||||
options = lappend(options, makeDefElemInt("createdb", role->rolcreatedb));
|
||||
options = lappend(options, makeDefElemInt("createrole", role->rolcreaterole));
|
||||
options = lappend(options, makeDefElemInt("inherit", role->rolinherit));
|
||||
options = lappend(options, makeDefElemInt("canlogin", role->rolcanlogin));
|
||||
options = lappend(options, makeDefElemInt("isreplication", role->rolreplication));
|
||||
options = lappend(options, makeDefElemInt("bypassrls", role->rolbypassrls));
|
||||
options = lappend(options, makeDefElemBool("superuser", role->rolsuper));
|
||||
options = lappend(options, makeDefElemBool("createdb", role->rolcreatedb));
|
||||
options = lappend(options, makeDefElemBool("createrole", role->rolcreaterole));
|
||||
options = lappend(options, makeDefElemBool("inherit", role->rolinherit));
|
||||
options = lappend(options, makeDefElemBool("canlogin", role->rolcanlogin));
|
||||
options = lappend(options, makeDefElemBool("isreplication", role->rolreplication));
|
||||
options = lappend(options, makeDefElemBool("bypassrls", role->rolbypassrls));
|
||||
options = lappend(options, makeDefElemInt("connectionlimit", role->rolconnlimit));
|
||||
|
||||
/* load password from heap tuple, use NULL if not set */
|
||||
|
@ -616,6 +619,16 @@ makeDefElemInt(char *name, int value)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* makeDefElemBool creates a DefElem with boolean typed value with -1 as location.
|
||||
*/
|
||||
static DefElem *
|
||||
makeDefElemBool(char *name, bool value)
|
||||
{
|
||||
return makeDefElem(name, (Node *) makeBoolean(value), -1);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetDatabaseNameFromDbRoleSetting performs a lookup, and finds the database name
|
||||
* associated DbRoleSetting Tuple
|
||||
|
|
|
@ -182,8 +182,17 @@ GetExplicitTriggerIdList(Oid relationId)
|
|||
* Note that we mark truncate trigger that we create on citus tables as
|
||||
* internal. Hence, below we discard citus_truncate_trigger as well as
|
||||
* the implicit triggers created by postgres for foreign key validation.
|
||||
*
|
||||
* Pre PG15, tgisinternal is true for a "child" trigger on a partition
|
||||
* cloned from the trigger on the parent.
|
||||
* In PG15, tgisinternal is false in that case. However, we don't want to
|
||||
* create this trigger on the partition since it will create a conflict
|
||||
* when we try to attach the partition to the parent table:
|
||||
* ERROR: trigger "..." for relation "{partition_name}" already exists
|
||||
* Hence we add an extra check on whether the parent id is invalid to
|
||||
* make sure this is not a child trigger
|
||||
*/
|
||||
if (!triggerForm->tgisinternal)
|
||||
if (!triggerForm->tgisinternal && (triggerForm->tgparentid == InvalidOid))
|
||||
{
|
||||
triggerIdList = lappend_oid(triggerIdList, triggerForm->oid);
|
||||
}
|
||||
|
|
|
@ -196,7 +196,7 @@ AppendDefElem(StringInfo buf, DefElem *def)
|
|||
static void
|
||||
AppendDefElemStrict(StringInfo buf, DefElem *def)
|
||||
{
|
||||
if (intVal(def->arg) == 1)
|
||||
if (boolVal(def->arg))
|
||||
{
|
||||
appendStringInfo(buf, " STRICT");
|
||||
}
|
||||
|
@ -223,7 +223,7 @@ AppendDefElemVolatility(StringInfo buf, DefElem *def)
|
|||
static void
|
||||
AppendDefElemLeakproof(StringInfo buf, DefElem *def)
|
||||
{
|
||||
if (intVal(def->arg) == 0)
|
||||
if (!boolVal(def->arg))
|
||||
{
|
||||
appendStringInfo(buf, " NOT");
|
||||
}
|
||||
|
@ -237,7 +237,7 @@ AppendDefElemLeakproof(StringInfo buf, DefElem *def)
|
|||
static void
|
||||
AppendDefElemSecurity(StringInfo buf, DefElem *def)
|
||||
{
|
||||
if (intVal(def->arg) == 0)
|
||||
if (!boolVal(def->arg))
|
||||
{
|
||||
appendStringInfo(buf, " SECURITY INVOKER");
|
||||
}
|
||||
|
|
|
@ -13,6 +13,8 @@
|
|||
|
||||
#include "postgres.h"
|
||||
|
||||
#include "pg_version_compat.h"
|
||||
|
||||
#include "distributed/citus_ruleutils.h"
|
||||
#include "distributed/deparser.h"
|
||||
#include "lib/stringinfo.h"
|
||||
|
@ -98,59 +100,59 @@ AppendRoleOption(StringInfo buf, ListCell *optionCell)
|
|||
{
|
||||
DefElem *option = (DefElem *) lfirst(optionCell);
|
||||
|
||||
if (strcmp(option->defname, "superuser") == 0 && intVal(option->arg))
|
||||
if (strcmp(option->defname, "superuser") == 0 && boolVal(option->arg))
|
||||
{
|
||||
appendStringInfo(buf, " SUPERUSER");
|
||||
}
|
||||
else if (strcmp(option->defname, "superuser") == 0 && !intVal(option->arg))
|
||||
else if (strcmp(option->defname, "superuser") == 0 && !boolVal(option->arg))
|
||||
{
|
||||
appendStringInfo(buf, " NOSUPERUSER");
|
||||
}
|
||||
else if (strcmp(option->defname, "createdb") == 0 && intVal(option->arg))
|
||||
else if (strcmp(option->defname, "createdb") == 0 && boolVal(option->arg))
|
||||
{
|
||||
appendStringInfo(buf, " CREATEDB");
|
||||
}
|
||||
else if (strcmp(option->defname, "createdb") == 0 && !intVal(option->arg))
|
||||
else if (strcmp(option->defname, "createdb") == 0 && !boolVal(option->arg))
|
||||
{
|
||||
appendStringInfo(buf, " NOCREATEDB");
|
||||
}
|
||||
else if (strcmp(option->defname, "createrole") == 0 && intVal(option->arg))
|
||||
else if (strcmp(option->defname, "createrole") == 0 && boolVal(option->arg))
|
||||
{
|
||||
appendStringInfo(buf, " CREATEROLE");
|
||||
}
|
||||
else if (strcmp(option->defname, "createrole") == 0 && !intVal(option->arg))
|
||||
else if (strcmp(option->defname, "createrole") == 0 && !boolVal(option->arg))
|
||||
{
|
||||
appendStringInfo(buf, " NOCREATEROLE");
|
||||
}
|
||||
else if (strcmp(option->defname, "inherit") == 0 && intVal(option->arg))
|
||||
else if (strcmp(option->defname, "inherit") == 0 && boolVal(option->arg))
|
||||
{
|
||||
appendStringInfo(buf, " INHERIT");
|
||||
}
|
||||
else if (strcmp(option->defname, "inherit") == 0 && !intVal(option->arg))
|
||||
else if (strcmp(option->defname, "inherit") == 0 && !boolVal(option->arg))
|
||||
{
|
||||
appendStringInfo(buf, " NOINHERIT");
|
||||
}
|
||||
else if (strcmp(option->defname, "canlogin") == 0 && intVal(option->arg))
|
||||
else if (strcmp(option->defname, "canlogin") == 0 && boolVal(option->arg))
|
||||
{
|
||||
appendStringInfo(buf, " LOGIN");
|
||||
}
|
||||
else if (strcmp(option->defname, "canlogin") == 0 && !intVal(option->arg))
|
||||
else if (strcmp(option->defname, "canlogin") == 0 && !boolVal(option->arg))
|
||||
{
|
||||
appendStringInfo(buf, " NOLOGIN");
|
||||
}
|
||||
else if (strcmp(option->defname, "isreplication") == 0 && intVal(option->arg))
|
||||
else if (strcmp(option->defname, "isreplication") == 0 && boolVal(option->arg))
|
||||
{
|
||||
appendStringInfo(buf, " REPLICATION");
|
||||
}
|
||||
else if (strcmp(option->defname, "isreplication") == 0 && !intVal(option->arg))
|
||||
else if (strcmp(option->defname, "isreplication") == 0 && !boolVal(option->arg))
|
||||
{
|
||||
appendStringInfo(buf, " NOREPLICATION");
|
||||
}
|
||||
else if (strcmp(option->defname, "bypassrls") == 0 && intVal(option->arg))
|
||||
else if (strcmp(option->defname, "bypassrls") == 0 && boolVal(option->arg))
|
||||
{
|
||||
appendStringInfo(buf, " BYPASSRLS");
|
||||
}
|
||||
else if (strcmp(option->defname, "bypassrls") == 0 && !intVal(option->arg))
|
||||
else if (strcmp(option->defname, "bypassrls") == 0 && !boolVal(option->arg))
|
||||
{
|
||||
appendStringInfo(buf, " NOBYPASSRLS");
|
||||
}
|
||||
|
|
|
@ -18,8 +18,7 @@
|
|||
|
||||
#include "pg_config.h"
|
||||
|
||||
/* We should drop PG 15 support from this file, this is only for testing purposes until #6085 is merged. */
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_14) && (PG_VERSION_NUM <= PG_VERSION_15)
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_14) && (PG_VERSION_NUM < PG_VERSION_15)
|
||||
|
||||
#include "postgres.h"
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -146,6 +146,7 @@ DEFINE_COLUMNAR_PASSTHROUGH_FUNC(test_columnar_storage_write_new_page)
|
|||
#define DUMMY_REAL_TIME_EXECUTOR_ENUM_VALUE 9999999
|
||||
static char *CitusVersion = CITUS_VERSION;
|
||||
static char *DeprecatedEmptyString = "";
|
||||
static char *MitmfifoEmptyString = "";
|
||||
|
||||
/* deprecated GUC value that should not be used anywhere outside this file */
|
||||
static int ReplicationModel = REPLICATION_MODEL_STREAMING;
|
||||
|
@ -1767,6 +1768,24 @@ RegisterCitusConfigVariables(void)
|
|||
GUC_UNIT_MS | GUC_NO_SHOW_ALL,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
/*
|
||||
* Previously we setting this configuration parameter
|
||||
* in the fly for failure tests schedule.
|
||||
* However, PG15 doesn't allow that anymore: reserved prefixes
|
||||
* like "citus" cannot be used to set non-existing GUCs.
|
||||
* Relevant PG commit: 88103567cb8fa5be46dc9fac3e3b8774951a2be7
|
||||
*/
|
||||
|
||||
DefineCustomStringVariable(
|
||||
"citus.mitmfifo",
|
||||
gettext_noop("Sets the citus mitm fifo path for failure tests"),
|
||||
gettext_noop("This GUC is only used for testing."),
|
||||
&MitmfifoEmptyString,
|
||||
"",
|
||||
PGC_SUSET,
|
||||
GUC_NO_SHOW_ALL,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomEnumVariable(
|
||||
"citus.multi_shard_modify_mode",
|
||||
gettext_noop("Sets the connection type for multi shard modify queries"),
|
||||
|
|
|
@ -14,5 +14,6 @@
|
|||
#define PG_VERSION_13 130000
|
||||
#define PG_VERSION_14 140000
|
||||
#define PG_VERSION_15 150000
|
||||
#define PG_VERSION_16 160000
|
||||
|
||||
#endif /* PG_VERSION_CONSTANTS */
|
||||
|
|
|
@ -39,6 +39,8 @@ typedef Value String;
|
|||
#define pgstat_init_relation(r) pgstat_initstats(r)
|
||||
#define pg_analyze_and_rewrite_fixedparams(a, b, c, d, e) pg_analyze_and_rewrite(a, b, c, \
|
||||
d, e)
|
||||
#define boolVal(v) intVal(v)
|
||||
#define makeBoolean(val) makeInteger(val)
|
||||
|
||||
static inline int64
|
||||
pg_strtoint64(char *s)
|
||||
|
|
|
@ -10,6 +10,25 @@ subdir = src/test/columnar_freezing
|
|||
top_builddir = ../../..
|
||||
include $(top_builddir)/Makefile.global
|
||||
|
||||
# In PG15, Perl test modules have been moved to a new namespace
|
||||
# new() and get_new_node() methods have been unified to 1 method: new()
|
||||
# Relevant PG commits 201a76183e2056c2217129e12d68c25ec9c559c8
|
||||
# b3b4d8e68ae83f432f43f035c7eb481ef93e1583
|
||||
pg_version = $(shell $(PG_CONFIG) --version 2>/dev/null)
|
||||
pg_whole_version = $(shell echo "$(pg_version)"| sed -e 's/^PostgreSQL \([0-9]*\)\(\.[0-9]*\)\{0,1\}\(.*\)/\1\2/')
|
||||
pg_major_version = $(shell echo "$(pg_whole_version)"| sed -e 's/^\([0-9]\{2\}\)\(.*\)/\1/')
|
||||
|
||||
# for now, we only have a single test file
|
||||
# due to the above explanation, we ended up separating the test paths for
|
||||
# different versions. If you need to add new test files, be careful to add both versions
|
||||
ifeq ($(pg_major_version),13)
|
||||
test_path = t_pg13_pg14/*.pl
|
||||
else ifeq ($(pg_major_version),14)
|
||||
test_path = t_pg13_pg14/*.pl
|
||||
else
|
||||
test_path = t/*.pl
|
||||
endif
|
||||
|
||||
# copied from pgxs/Makefile.global to use postgres' abs build dir for pg_regress
|
||||
ifeq ($(enable_tap_tests),yes)
|
||||
|
||||
|
@ -23,7 +42,7 @@ PGPORT='6$(DEF_PGPORT)' \
|
|||
top_builddir='$(CURDIR)/$(top_builddir)' \
|
||||
PG_REGRESS='$(pgxsdir)/src/test/regress/pg_regress' \
|
||||
TEMP_CONFIG='$(CURDIR)'/postgresql.conf \
|
||||
$(PROVE) $(PG_PROVE_FLAGS) $(PROVE_FLAGS) $(if $(PROVE_TESTS),$(PROVE_TESTS),t/*.pl)
|
||||
$(PROVE) $(PG_PROVE_FLAGS) $(PROVE_FLAGS) $(if $(PROVE_TESTS),$(PROVE_TESTS),$(test_path))
|
||||
endef
|
||||
|
||||
else
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
# Minimal test testing streaming replication
|
||||
use strict;
|
||||
use warnings;
|
||||
use PostgresNode;
|
||||
use TestLib;
|
||||
use PostgreSQL::Test::Cluster;
|
||||
use PostgreSQL::Test::Utils;
|
||||
use Test::More tests => 2;
|
||||
|
||||
# Initialize single node
|
||||
my $node_one = get_new_node('node_one');
|
||||
my $node_one = PostgreSQL::Test::Cluster->new('node_one');
|
||||
$node_one->init();
|
||||
$node_one->start;
|
||||
|
||||
|
|
|
@ -0,0 +1,52 @@
|
|||
# Minimal test testing streaming replication
|
||||
use strict;
|
||||
use warnings;
|
||||
use PostgresNode;
|
||||
use TestLib;
|
||||
use Test::More tests => 2;
|
||||
|
||||
# Initialize single node
|
||||
my $node_one = get_new_node('node_one');
|
||||
$node_one->init();
|
||||
$node_one->start;
|
||||
|
||||
# initialize the citus extension
|
||||
$node_one->safe_psql('postgres', "CREATE EXTENSION citus;");
|
||||
|
||||
# create columnar table and insert simple data to verify the data survives a crash
|
||||
$node_one->safe_psql('postgres', "
|
||||
CREATE TABLE test_row(i int);
|
||||
INSERT INTO test_row VALUES (1);
|
||||
CREATE TABLE test_columnar_freeze(i int) USING columnar WITH(autovacuum_enabled=false);
|
||||
INSERT INTO test_columnar_freeze VALUES (1);
|
||||
");
|
||||
|
||||
my $ten_thousand_updates = "";
|
||||
|
||||
foreach (1..10000) {
|
||||
$ten_thousand_updates .= "UPDATE test_row SET i = i + 1;\n";
|
||||
}
|
||||
|
||||
# 70K updates
|
||||
foreach (1..7) {
|
||||
$node_one->safe_psql('postgres', $ten_thousand_updates);
|
||||
}
|
||||
|
||||
my $result = $node_one->safe_psql('postgres', "
|
||||
select age(relfrozenxid) < 70000 as was_frozen
|
||||
from pg_class where relname='test_columnar_freeze';
|
||||
");
|
||||
print "node one count: $result\n";
|
||||
is($result, qq(f), 'columnar table was not frozen');
|
||||
|
||||
$node_one->safe_psql('postgres', 'VACUUM FREEZE test_columnar_freeze;');
|
||||
|
||||
$result = $node_one->safe_psql('postgres', "
|
||||
select age(relfrozenxid) < 70000 as was_frozen
|
||||
from pg_class where relname='test_columnar_freeze';
|
||||
");
|
||||
print "node one count: $result\n";
|
||||
is($result, qq(t), 'columnar table was frozen');
|
||||
|
||||
$node_one->stop('fast');
|
||||
|
|
@ -12,6 +12,25 @@ subdir = src/test/recovery
|
|||
top_builddir = ../../..
|
||||
include $(top_builddir)/Makefile.global
|
||||
|
||||
# In PG15, Perl test modules have been moved to a new namespace
|
||||
# new() and get_new_node() methods have been unified to 1 method: new()
|
||||
# Relevant PG commits 201a76183e2056c2217129e12d68c25ec9c559c8
|
||||
# b3b4d8e68ae83f432f43f035c7eb481ef93e1583
|
||||
pg_version = $(shell $(PG_CONFIG) --version 2>/dev/null)
|
||||
pg_whole_version = $(shell echo "$(pg_version)"| sed -e 's/^PostgreSQL \([0-9]*\)\(\.[0-9]*\)\{0,1\}\(.*\)/\1\2/')
|
||||
pg_major_version = $(shell echo "$(pg_whole_version)"| sed -e 's/^\([0-9]\{2\}\)\(.*\)/\1/')
|
||||
|
||||
# for now, we only have a single test file
|
||||
# due to the above explanation, we ended up separating the test paths for
|
||||
# different versions. If you need to add new test files, be careful to add both versions
|
||||
ifeq ($(pg_major_version),13)
|
||||
test_path = t_pg13_pg14/*.pl
|
||||
else ifeq ($(pg_major_version),14)
|
||||
test_path = t_pg13_pg14/*.pl
|
||||
else
|
||||
test_path = t/*.pl
|
||||
endif
|
||||
|
||||
# copied from pgxs/Makefile.global to use postgres' abs build dir for pg_regress
|
||||
ifeq ($(enable_tap_tests),yes)
|
||||
|
||||
|
@ -25,7 +44,7 @@ PGPORT='6$(DEF_PGPORT)' \
|
|||
top_builddir='$(CURDIR)/$(top_builddir)' \
|
||||
PG_REGRESS='$(pgxsdir)/src/test/regress/pg_regress' \
|
||||
TEMP_CONFIG='$(CURDIR)'/postgresql.conf \
|
||||
$(PROVE) $(PG_PROVE_FLAGS) $(PROVE_FLAGS) $(if $(PROVE_TESTS),$(PROVE_TESTS),t/*.pl)
|
||||
$(PROVE) $(PG_PROVE_FLAGS) $(PROVE_FLAGS) $(if $(PROVE_TESTS),$(PROVE_TESTS),$(test_path))
|
||||
endef
|
||||
|
||||
else
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
# Minimal test testing streaming replication
|
||||
use strict;
|
||||
use warnings;
|
||||
use PostgresNode;
|
||||
use TestLib;
|
||||
use PostgreSQL::Test::Cluster;
|
||||
use PostgreSQL::Test::Utils;
|
||||
use Test::More tests => 6;
|
||||
|
||||
# Initialize single node
|
||||
my $node_one = get_new_node('node_one');
|
||||
my $node_one = PostgreSQL::Test::Cluster->new('node_one');
|
||||
$node_one->init();
|
||||
$node_one->start;
|
||||
|
||||
|
|
|
@ -0,0 +1,98 @@
|
|||
# Minimal test testing streaming replication
|
||||
use strict;
|
||||
use warnings;
|
||||
use PostgresNode;
|
||||
use TestLib;
|
||||
use Test::More tests => 6;
|
||||
|
||||
# Initialize single node
|
||||
my $node_one = get_new_node('node_one');
|
||||
$node_one->init();
|
||||
$node_one->start;
|
||||
|
||||
# initialize the citus extension
|
||||
$node_one->safe_psql('postgres', "CREATE EXTENSION citus;");
|
||||
|
||||
# create columnar table and insert simple data to verify the data survives a crash
|
||||
$node_one->safe_psql('postgres', "
|
||||
BEGIN;
|
||||
CREATE TABLE t1 (a int, b text) USING columnar;
|
||||
INSERT INTO t1 SELECT a, 'hello world ' || a FROM generate_series(1,1002) AS a;
|
||||
COMMIT;
|
||||
");
|
||||
|
||||
# simulate crash
|
||||
$node_one->stop('immediate');
|
||||
$node_one->start;
|
||||
|
||||
my $result = $node_one->safe_psql('postgres', "SELECT count(*) FROM t1;");
|
||||
print "node one count: $result\n";
|
||||
is($result, qq(1002), 'columnar recovered data from before crash');
|
||||
|
||||
|
||||
# truncate the table to verify the truncation survives a crash
|
||||
$node_one->safe_psql('postgres', "
|
||||
TRUNCATE t1;
|
||||
");
|
||||
|
||||
# simulate crash
|
||||
$node_one->stop('immediate');
|
||||
$node_one->start;
|
||||
|
||||
$result = $node_one->safe_psql('postgres', "SELECT count(*) FROM t1;");
|
||||
print "node one count: $result\n";
|
||||
is($result, qq(0), 'columnar recovered truncation');
|
||||
|
||||
# test crashing while having an open transaction
|
||||
$node_one->safe_psql('postgres', "
|
||||
BEGIN;
|
||||
INSERT INTO t1 SELECT a, 'hello world ' || a FROM generate_series(1,1003) AS a;
|
||||
");
|
||||
|
||||
# simulate crash
|
||||
$node_one->stop('immediate');
|
||||
$node_one->start;
|
||||
|
||||
$result = $node_one->safe_psql('postgres', "SELECT count(*) FROM t1;");
|
||||
print "node one count: $result\n";
|
||||
is($result, qq(0), 'columnar crash during uncommitted transaction');
|
||||
|
||||
# test crashing while having a prepared transaction
|
||||
$node_one->safe_psql('postgres', "
|
||||
BEGIN;
|
||||
INSERT INTO t1 SELECT a, 'hello world ' || a FROM generate_series(1,1004) AS a;
|
||||
PREPARE TRANSACTION 'prepared_xact_crash';
|
||||
");
|
||||
|
||||
# simulate crash
|
||||
$node_one->stop('immediate');
|
||||
$node_one->start;
|
||||
|
||||
$result = $node_one->safe_psql('postgres', "SELECT count(*) FROM t1;");
|
||||
print "node one count: $result\n";
|
||||
is($result, qq(0), 'columnar crash during prepared transaction (before commit)');
|
||||
|
||||
$node_one->safe_psql('postgres', "
|
||||
COMMIT PREPARED 'prepared_xact_crash';
|
||||
");
|
||||
|
||||
$result = $node_one->safe_psql('postgres', "SELECT count(*) FROM t1;");
|
||||
print "node one count: $result\n";
|
||||
is($result, qq(1004), 'columnar crash during prepared transaction (after commit)');
|
||||
|
||||
# test crash recovery with copied data
|
||||
$node_one->safe_psql('postgres', "
|
||||
\\copy t1 FROM stdin delimiter ','
|
||||
1,a
|
||||
2,b
|
||||
3,c
|
||||
\\.
|
||||
");
|
||||
|
||||
# simulate crash
|
||||
$node_one->stop('immediate');
|
||||
$node_one->start;
|
||||
|
||||
$result = $node_one->safe_psql('postgres', "SELECT count(*) FROM t1;");
|
||||
print "node one count: $result\n";
|
||||
is($result, qq(1007), 'columnar crash after copy command');
|
|
@ -122,6 +122,11 @@ s/(ERROR: |WARNING: |error:) invalid socket/\1 connection not open/g
|
|||
# Extra outputs after minor bump to PG14.5 and PG13.8
|
||||
/^\s*invalid socket$/d
|
||||
|
||||
# pg15 changes
|
||||
s/is not a PostgreSQL server process/is not a PostgreSQL backend process/g
|
||||
s/ AS "\?column\?"//g
|
||||
s/".*\.(.*)": (found .* removable)/"\1": \2/g
|
||||
|
||||
# intermediate_results
|
||||
s/(ERROR.*)pgsql_job_cache\/([0-9]+_[0-9]+_[0-9]+)\/(.*).data/\1pgsql_job_cache\/xx_x_xxx\/\3.data/g
|
||||
|
||||
|
|
|
@ -1,3 +1,17 @@
|
|||
--
|
||||
-- CITUS_LOCAL_TABLES_QUERIES
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
\set VERBOSITY terse
|
||||
SET citus.next_shard_id TO 1509000;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
@ -570,7 +584,7 @@ SELECT clear_and_init_test_tables();
|
|||
|
||||
INSERT INTO citus_local_table
|
||||
SELECT * from reference_table;
|
||||
NOTICE: executing the command locally: INSERT INTO citus_local_table_queries.citus_local_table_1509001 AS citus_table_alias (a, b) SELECT a, b FROM citus_local_table_queries.reference_table_1509003 reference_table
|
||||
NOTICE: executing the command locally: INSERT INTO citus_local_table_queries.citus_local_table_1509001 AS citus_table_alias (a, b) SELECT reference_table.a, reference_table.b FROM citus_local_table_queries.reference_table_1509003 reference_table
|
||||
INSERT INTO reference_table
|
||||
SELECT * from citus_local_table;
|
||||
NOTICE: executing the command locally: SELECT a, b FROM citus_local_table_queries.citus_local_table_1509001 citus_local_table
|
||||
|
@ -583,7 +597,7 @@ SELECT * from citus_local_table;
|
|||
NOTICE: executing the command locally: SELECT a, b FROM citus_local_table_queries.citus_local_table_1509001 citus_local_table
|
||||
INSERT INTO citus_local_table
|
||||
SELECT * from citus_local_table_2;
|
||||
NOTICE: executing the command locally: INSERT INTO citus_local_table_queries.citus_local_table_1509001 AS citus_table_alias (a, b) SELECT a, b FROM citus_local_table_queries.citus_local_table_2_1509002 citus_local_table_2
|
||||
NOTICE: executing the command locally: INSERT INTO citus_local_table_queries.citus_local_table_1509001 AS citus_table_alias (a, b) SELECT citus_local_table_2.a, citus_local_table_2.b FROM citus_local_table_queries.citus_local_table_2_1509002 citus_local_table_2
|
||||
INSERT INTO citus_local_table
|
||||
SELECT * from citus_local_table_2
|
||||
ORDER BY 1,2
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -264,17 +264,21 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
|
|||
Columnar Projected Columns: a
|
||||
(9 rows)
|
||||
|
||||
SELECT plan_without_arrows($Q$
|
||||
EXPLAIN (costs off, timing off, summary off)
|
||||
SELECT y, * FROM another_columnar_table;
|
||||
QUERY PLAN
|
||||
$Q$);
|
||||
plan_without_arrows
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (ColumnarScan) on another_columnar_table
|
||||
Columnar Projected Columns: x, y
|
||||
(2 rows)
|
||||
|
||||
SELECT plan_without_arrows($Q$
|
||||
EXPLAIN (costs off, timing off, summary off)
|
||||
SELECT *, x FROM another_columnar_table;
|
||||
QUERY PLAN
|
||||
$Q$);
|
||||
plan_without_arrows
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (ColumnarScan) on another_columnar_table
|
||||
Columnar Projected Columns: x, y
|
||||
|
|
|
@ -957,13 +957,16 @@ SELECT * FROM weird_col_explain;
|
|||
Columnar Projected Columns: "bbbbbbbbbbbbbbbbbbbbbbbbb\!bbbb'bbbbbbbbbbbbbbbbbbbbb''bbbbbbbb", "aaaaaaaaaaaa$aaaaaa$$aaaaaaaaaaaaaaaaaaaaaaaaaaaaa'aaaaaaaa'$a'"
|
||||
(7 rows)
|
||||
|
||||
\set VERBOSITY terse
|
||||
SELECT public.plan_without_result_lines($Q$
|
||||
EXPLAIN (COSTS OFF, SUMMARY OFF)
|
||||
SELECT *, "bbbbbbbbbbbbbbbbbbbbbbbbb\!bbbb'bbbbbbbbbbbbbbbbbbbbb''bbbbbbbb"
|
||||
FROM weird_col_explain
|
||||
WHERE "bbbbbbbbbbbbbbbbbbbbbbbbb\!bbbb'bbbbbbbbbbbbbbbbbbbbb''bbbbbbbb" * 2 >
|
||||
"aaaaaaaaaaaa$aaaaaa$$aaaaaaaaaaaaaaaaaaaaaaaaaaaaa'aaaaaaaa'$a'!";
|
||||
$Q$);
|
||||
NOTICE: identifier "aaaaaaaaaaaa$aaaaaa$$aaaaaaaaaaaaaaaaaaaaaaaaaaaaa'aaaaaaaa'$a'!" will be truncated to "aaaaaaaaaaaa$aaaaaa$$aaaaaaaaaaaaaaaaaaaaaaaaaaaaa'aaaaaaaa'$a'"
|
||||
QUERY PLAN
|
||||
plan_without_result_lines
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
Task Count: 4
|
||||
|
@ -975,6 +978,7 @@ NOTICE: identifier "aaaaaaaaaaaa$aaaaaa$$aaaaaaaaaaaaaaaaaaaaaaaaaaaaa'aaaaaaaa
|
|||
Columnar Projected Columns: "bbbbbbbbbbbbbbbbbbbbbbbbb\!bbbb'bbbbbbbbbbbbbbbbbbbbb''bbbbbbbb", "aaaaaaaaaaaa$aaaaaa$$aaaaaaaaaaaaaaaaaaaaaaaaaaaaa'aaaaaaaa'$a'"
|
||||
(8 rows)
|
||||
|
||||
\set VERBOSITY default
|
||||
-- should not project any columns
|
||||
EXPLAIN (COSTS OFF, SUMMARY OFF)
|
||||
SELECT COUNT(*) FROM weird_col_explain;
|
||||
|
|
|
@ -1,4 +1,19 @@
|
|||
--
|
||||
-- COORDINATOR_SHOULDHAVESHARDS
|
||||
--
|
||||
-- Test queries on a distributed table with shards on the coordinator
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
CREATE SCHEMA coordinator_shouldhaveshards;
|
||||
SET search_path TO coordinator_shouldhaveshards;
|
||||
SET citus.next_shard_id TO 1503000;
|
||||
|
@ -113,8 +128,8 @@ NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1
|
|||
INSERT INTO repart_test (x, y) SELECT y, x FROM test;
|
||||
NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_1503000_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_1503000_to','SELECT y AS x, x AS y FROM coordinator_shouldhaveshards.test_1503000 test WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0
|
||||
NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_1503003_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_1503003_to','SELECT y AS x, x AS y FROM coordinator_shouldhaveshards.test_1503003 test WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0
|
||||
NOTICE: executing the command locally: INSERT INTO coordinator_shouldhaveshards.repart_test_1503004 AS citus_table_alias (x, y) SELECT x, y FROM read_intermediate_results('{repartitioned_results_xxxxx_from_1503000_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(x integer, y integer)
|
||||
NOTICE: executing the command locally: INSERT INTO coordinator_shouldhaveshards.repart_test_1503007 AS citus_table_alias (x, y) SELECT x, y FROM read_intermediate_results('{repartitioned_results_xxxxx_from_1503003_to_3}'::text[], 'binary'::citus_copy_format) intermediate_result(x integer, y integer)
|
||||
NOTICE: executing the command locally: INSERT INTO coordinator_shouldhaveshards.repart_test_1503004 AS citus_table_alias (x, y) SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_results('{repartitioned_results_xxxxx_from_1503000_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(x integer, y integer)
|
||||
NOTICE: executing the command locally: INSERT INTO coordinator_shouldhaveshards.repart_test_1503007 AS citus_table_alias (x, y) SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_results('{repartitioned_results_xxxxx_from_1503003_to_3}'::text[], 'binary'::citus_copy_format) intermediate_result(x integer, y integer)
|
||||
SELECT y FROM repart_test WHERE x = 1000;
|
||||
y
|
||||
---------------------------------------------------------------------
|
||||
|
@ -124,8 +139,8 @@ SELECT y FROM repart_test WHERE x = 1000;
|
|||
INSERT INTO repart_test (x, y) SELECT y, x FROM test ON CONFLICT (x) DO UPDATE SET y = -1;
|
||||
NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_1503000_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_1503000_to','SELECT y AS x, x AS y FROM coordinator_shouldhaveshards.test_1503000 test WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0
|
||||
NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_1503003_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_1503003_to','SELECT y AS x, x AS y FROM coordinator_shouldhaveshards.test_1503003 test WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0
|
||||
NOTICE: executing the command locally: INSERT INTO coordinator_shouldhaveshards.repart_test_1503004 AS citus_table_alias (x, y) SELECT x, y FROM read_intermediate_results('{repartitioned_results_xxxxx_from_1503000_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ON CONFLICT(x) DO UPDATE SET y = '-1'::integer
|
||||
NOTICE: executing the command locally: INSERT INTO coordinator_shouldhaveshards.repart_test_1503007 AS citus_table_alias (x, y) SELECT x, y FROM read_intermediate_results('{repartitioned_results_xxxxx_from_1503003_to_3}'::text[], 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ON CONFLICT(x) DO UPDATE SET y = '-1'::integer
|
||||
NOTICE: executing the command locally: INSERT INTO coordinator_shouldhaveshards.repart_test_1503004 AS citus_table_alias (x, y) SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_results('{repartitioned_results_xxxxx_from_1503000_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ON CONFLICT(x) DO UPDATE SET y = '-1'::integer
|
||||
NOTICE: executing the command locally: INSERT INTO coordinator_shouldhaveshards.repart_test_1503007 AS citus_table_alias (x, y) SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_results('{repartitioned_results_xxxxx_from_1503003_to_3}'::text[], 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ON CONFLICT(x) DO UPDATE SET y = '-1'::integer
|
||||
SELECT y FROM repart_test WHERE x = 1000;
|
||||
y
|
||||
---------------------------------------------------------------------
|
||||
|
@ -446,7 +461,7 @@ BEGIN;
|
|||
-- in postgres we wouldn't see this modifying cte, so it is consistent with postgres.
|
||||
WITH a AS (SELECT count(*) FROM test), b AS (INSERT INTO local VALUES (3,2) RETURNING *), c AS (INSERT INTO ref SELECT *,* FROM generate_series(1,10) RETURNING *), d AS (SELECT count(*) FROM ref JOIN local ON (a = x)) SELECT * FROM a, b, c, d ORDER BY x,y,a,b;
|
||||
NOTICE: executing the copy locally for colocated file with shard xxxxx
|
||||
NOTICE: executing the command locally: INSERT INTO coordinator_shouldhaveshards.ref_1503020 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_1503020'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) RETURNING citus_table_alias.a, citus_table_alias.b
|
||||
NOTICE: executing the command locally: INSERT INTO coordinator_shouldhaveshards.ref_1503020 AS citus_table_alias (a, b) SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('insert_select_XXX_1503020'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) RETURNING citus_table_alias.a, citus_table_alias.b
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_shouldhaveshards.test_1503000 test WHERE true
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_shouldhaveshards.test_1503003 test WHERE true
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_shouldhaveshards.ref_1503020 ref JOIN (SELECT local_1.x, NULL::integer AS y FROM (SELECT intermediate_result.x FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) local_1) local ON ((ref.a OPERATOR(pg_catalog.=) local.x)))
|
||||
|
@ -610,7 +625,7 @@ INSERT INTO ref_table SELECT * FROM ref_table LIMIT 10000 ON CONFLICT (x) DO UPD
|
|||
SELECT count(*) FROM cte_1;
|
||||
NOTICE: executing the command locally: SELECT x, y FROM coordinator_shouldhaveshards.ref_table_1503039 ref_table LIMIT 10000
|
||||
NOTICE: executing the copy locally for colocated file with shard xxxxx
|
||||
NOTICE: executing the command locally: INSERT INTO coordinator_shouldhaveshards.ref_table_1503039 AS citus_table_alias (x, y) SELECT x, y FROM read_intermediate_result('insert_select_XXX_1503039'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ON CONFLICT(x) DO UPDATE SET y = (excluded.y OPERATOR(pg_catalog.+) 1) RETURNING citus_table_alias.x, citus_table_alias.y
|
||||
NOTICE: executing the command locally: INSERT INTO coordinator_shouldhaveshards.ref_table_1503039 AS citus_table_alias (x, y) SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('insert_select_XXX_1503039'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ON CONFLICT(x) DO UPDATE SET y = (excluded.y OPERATOR(pg_catalog.+) 1) RETURNING citus_table_alias.x, citus_table_alias.y
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) cte_1
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
|
@ -923,7 +938,7 @@ inserts AS (
|
|||
RETURNING *
|
||||
) SELECT count(*) FROM inserts;
|
||||
DEBUG: generating subplan XXX_1 for CTE stats: SELECT count(key) AS m FROM coordinator_shouldhaveshards.table_1
|
||||
DEBUG: generating subplan XXX_2 for CTE inserts: INSERT INTO coordinator_shouldhaveshards.table_2 (key, value) SELECT key, count(*) AS count FROM coordinator_shouldhaveshards.table_1 WHERE (key OPERATOR(pg_catalog.>=) (SELECT stats.m FROM (SELECT intermediate_result.m FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(m bigint)) stats)) GROUP BY key HAVING (count(*) OPERATOR(pg_catalog.<=) (SELECT stats.m FROM (SELECT intermediate_result.m FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(m bigint)) stats)) LIMIT 1 RETURNING table_2.key, table_2.value
|
||||
DEBUG: generating subplan XXX_2 for CTE inserts: INSERT INTO coordinator_shouldhaveshards.table_2 (key, value) SELECT table_1.key, count(*) AS count FROM coordinator_shouldhaveshards.table_1 WHERE (table_1.key OPERATOR(pg_catalog.>=) (SELECT stats.m FROM (SELECT intermediate_result.m FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(m bigint)) stats)) GROUP BY table_1.key HAVING (count(*) OPERATOR(pg_catalog.<=) (SELECT stats.m FROM (SELECT intermediate_result.m FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(m bigint)) stats)) LIMIT 1 RETURNING table_2.key, table_2.value
|
||||
DEBUG: LIMIT clauses are not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: push down of limit count: 1
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) inserts
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,3 +1,17 @@
|
|||
--
|
||||
-- CTE_INLINE
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
CREATE SCHEMA cte_inline;
|
||||
SET search_path TO cte_inline;
|
||||
SET citus.next_shard_id TO 1960000;
|
||||
|
@ -423,6 +437,8 @@ DEBUG: join prunable for intervals [1073741824,2147483647] and [0,1073741823]
|
|||
(1 row)
|
||||
|
||||
-- EXPLAIN should show the differences between MATERIALIZED and NOT MATERIALIZED
|
||||
\set VERBOSITY terse
|
||||
SELECT public.coordinator_plan_with_subplans($Q$
|
||||
EXPLAIN (COSTS OFF) WITH cte_1 AS (SELECT * FROM test_table)
|
||||
SELECT
|
||||
count(*)
|
||||
|
@ -431,35 +447,44 @@ FROM
|
|||
JOIN
|
||||
cte_1 as second_entry
|
||||
USING (key);
|
||||
$Q$);
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT key, value, other_value FROM cte_inline.test_table
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.value, intermediate_result.other_value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, other_value jsonb)) first_entry JOIN (SELECT intermediate_result.key, intermediate_result.value, intermediate_result.other_value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, other_value jsonb)) second_entry USING (key))
|
||||
DEBUG: Creating router plan
|
||||
QUERY PLAN
|
||||
coordinator_plan_with_subplans
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
-> Distributed Subplan XXX_1
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Seq Scan on test_table_1960000 test_table
|
||||
Task Count: 1
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (intermediate_result.key = intermediate_result_1.key)
|
||||
-> Sort
|
||||
Sort Key: intermediate_result.key
|
||||
-> Function Scan on read_intermediate_result intermediate_result
|
||||
-> Sort
|
||||
Sort Key: intermediate_result_1.key
|
||||
-> Function Scan on read_intermediate_result intermediate_result_1
|
||||
(21 rows)
|
||||
(5 rows)
|
||||
|
||||
\set VERBOSITY default
|
||||
-- enable_group_by_reordering is a new GUC introduced in PG15
|
||||
-- it does some optimization of the order of group by keys which results
|
||||
-- in a different explain output plan between PG13/14 and PG15
|
||||
-- Hence we set that GUC to off.
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
\if :server_version_ge_15
|
||||
SET enable_group_by_reordering TO off;
|
||||
\endif
|
||||
SELECT DISTINCT 1 FROM run_command_on_workers($$ALTER SYSTEM SET enable_group_by_reordering TO off;$$);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT pg_reload_conf()$$);
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,t)
|
||||
(localhost,57638,t,t)
|
||||
(2 rows)
|
||||
|
||||
EXPLAIN (COSTS OFF) WITH cte_1 AS NOT MATERIALIZED (SELECT * FROM test_table)
|
||||
SELECT
|
||||
|
@ -499,6 +524,22 @@ DEBUG: join prunable for intervals [1073741824,2147483647] and [0,1073741823]
|
|||
-> Seq Scan on test_table_1960000 test_table_1
|
||||
(12 rows)
|
||||
|
||||
\if :server_version_ge_15
|
||||
RESET enable_group_by_reordering;
|
||||
\endif
|
||||
SELECT DISTINCT 1 FROM run_command_on_workers($$ALTER SYSTEM RESET enable_group_by_reordering;$$);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT pg_reload_conf()$$);
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,t)
|
||||
(localhost,57638,t,t)
|
||||
(2 rows)
|
||||
|
||||
-- ctes with volatile functions are not
|
||||
-- inlined
|
||||
WITH cte_1 AS (SELECT *, random() FROM test_table)
|
||||
|
@ -830,10 +871,10 @@ DEBUG: CTE fist_table_cte is going to be inlined via distributed planning
|
|||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: performing repartitioned INSERT ... SELECT
|
||||
DEBUG: partitioning SELECT query by column index 0 with name 'key'
|
||||
DEBUG: distributed statement: INSERT INTO cte_inline.test_table_1960000 AS citus_table_alias (key, value) SELECT key, value FROM read_intermediate_results('{repartitioned_results_xxxxx_from_1960000_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(key integer, value text)
|
||||
DEBUG: distributed statement: INSERT INTO cte_inline.test_table_1960001 AS citus_table_alias (key, value) SELECT key, value FROM read_intermediate_results('{repartitioned_results_xxxxx_from_1960001_to_1}'::text[], 'binary'::citus_copy_format) intermediate_result(key integer, value text)
|
||||
DEBUG: distributed statement: INSERT INTO cte_inline.test_table_1960002 AS citus_table_alias (key, value) SELECT key, value FROM read_intermediate_results('{repartitioned_results_xxxxx_from_1960002_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(key integer, value text)
|
||||
DEBUG: distributed statement: INSERT INTO cte_inline.test_table_1960003 AS citus_table_alias (key, value) SELECT key, value FROM read_intermediate_results('{repartitioned_results_xxxxx_from_1960003_to_3}'::text[], 'binary'::citus_copy_format) intermediate_result(key integer, value text)
|
||||
DEBUG: distributed statement: INSERT INTO cte_inline.test_table_1960000 AS citus_table_alias (key, value) SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_results('{repartitioned_results_xxxxx_from_1960000_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(key integer, value text)
|
||||
DEBUG: distributed statement: INSERT INTO cte_inline.test_table_1960001 AS citus_table_alias (key, value) SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_results('{repartitioned_results_xxxxx_from_1960001_to_1}'::text[], 'binary'::citus_copy_format) intermediate_result(key integer, value text)
|
||||
DEBUG: distributed statement: INSERT INTO cte_inline.test_table_1960002 AS citus_table_alias (key, value) SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_results('{repartitioned_results_xxxxx_from_1960002_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(key integer, value text)
|
||||
DEBUG: distributed statement: INSERT INTO cte_inline.test_table_1960003 AS citus_table_alias (key, value) SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_results('{repartitioned_results_xxxxx_from_1960003_to_3}'::text[], 'binary'::citus_copy_format) intermediate_result(key integer, value text)
|
||||
-- the following INSERT..SELECT is even more interesting
|
||||
-- the CTE becomes pushdownable
|
||||
INSERT INTO test_table
|
||||
|
@ -844,10 +885,10 @@ WITH fist_table_cte AS
|
|||
FROM
|
||||
fist_table_cte;
|
||||
DEBUG: CTE fist_table_cte is going to be inlined via distributed planning
|
||||
DEBUG: distributed statement: INSERT INTO cte_inline.test_table_1960000 AS citus_table_alias (key, value) SELECT key, value FROM (SELECT test_table.key, test_table.value, test_table.other_value FROM cte_inline.test_table_1960000 test_table) fist_table_cte WHERE (key IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO cte_inline.test_table_1960001 AS citus_table_alias (key, value) SELECT key, value FROM (SELECT test_table.key, test_table.value, test_table.other_value FROM cte_inline.test_table_1960001 test_table) fist_table_cte WHERE (key IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO cte_inline.test_table_1960002 AS citus_table_alias (key, value) SELECT key, value FROM (SELECT test_table.key, test_table.value, test_table.other_value FROM cte_inline.test_table_1960002 test_table) fist_table_cte WHERE (key IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO cte_inline.test_table_1960003 AS citus_table_alias (key, value) SELECT key, value FROM (SELECT test_table.key, test_table.value, test_table.other_value FROM cte_inline.test_table_1960003 test_table) fist_table_cte WHERE (key IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO cte_inline.test_table_1960000 AS citus_table_alias (key, value) SELECT fist_table_cte.key, fist_table_cte.value FROM (SELECT test_table.key, test_table.value, test_table.other_value FROM cte_inline.test_table_1960000 test_table) fist_table_cte WHERE (fist_table_cte.key IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO cte_inline.test_table_1960001 AS citus_table_alias (key, value) SELECT fist_table_cte.key, fist_table_cte.value FROM (SELECT test_table.key, test_table.value, test_table.other_value FROM cte_inline.test_table_1960001 test_table) fist_table_cte WHERE (fist_table_cte.key IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO cte_inline.test_table_1960002 AS citus_table_alias (key, value) SELECT fist_table_cte.key, fist_table_cte.value FROM (SELECT test_table.key, test_table.value, test_table.other_value FROM cte_inline.test_table_1960002 test_table) fist_table_cte WHERE (fist_table_cte.key IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO cte_inline.test_table_1960003 AS citus_table_alias (key, value) SELECT fist_table_cte.key, fist_table_cte.value FROM (SELECT test_table.key, test_table.value, test_table.other_value FROM cte_inline.test_table_1960003 test_table) fist_table_cte WHERE (fist_table_cte.key IS NOT NULL)
|
||||
-- update/delete/modifying ctes
|
||||
-- we don't support any cte inlining in modifications
|
||||
-- queries and modifying CTEs
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -733,33 +733,29 @@ SELECT operation_type, product_sku, state_code FROM record_sale ORDER BY 1,2,3;
|
|||
--
|
||||
--Test ALTER TRIGGER
|
||||
--
|
||||
-- Pre PG15, renaming the trigger on the parent table didn't rename the same trigger on
|
||||
-- the children as well. Hence, let's not print the trigger names of the children
|
||||
-- In PG15, rename is consistent for all partitions of the parent
|
||||
-- This is tested in pg15.sql file.
|
||||
CREATE VIEW sale_triggers AS
|
||||
SELECT tgname, tgrelid::regclass, tgenabled
|
||||
FROM pg_trigger
|
||||
WHERE tgrelid::regclass::text like 'sale%'
|
||||
WHERE tgrelid::regclass::text = 'sale'
|
||||
ORDER BY 1, 2;
|
||||
SELECT * FROM sale_triggers ORDER BY 1,2;
|
||||
tgname | tgrelid | tgenabled
|
||||
---------------------------------------------------------------------
|
||||
record_sale_trigger | sale | O
|
||||
record_sale_trigger | sale_newyork | O
|
||||
record_sale_trigger | sale_california | O
|
||||
truncate_trigger_xxxxxxx | sale | O
|
||||
truncate_trigger_xxxxxxx | sale_california | O
|
||||
truncate_trigger_xxxxxxx | sale_newyork | O
|
||||
(6 rows)
|
||||
(2 rows)
|
||||
|
||||
ALTER TRIGGER "record_sale_trigger" ON "distributed_triggers"."sale" RENAME TO "new_record_sale_trigger";
|
||||
SELECT * FROM sale_triggers ORDER BY 1,2;
|
||||
tgname | tgrelid | tgenabled
|
||||
---------------------------------------------------------------------
|
||||
new_record_sale_trigger | sale | O
|
||||
record_sale_trigger | sale_newyork | O
|
||||
record_sale_trigger | sale_california | O
|
||||
truncate_trigger_xxxxxxx | sale | O
|
||||
truncate_trigger_xxxxxxx | sale_california | O
|
||||
truncate_trigger_xxxxxxx | sale_newyork | O
|
||||
(6 rows)
|
||||
(2 rows)
|
||||
|
||||
CREATE EXTENSION seg;
|
||||
ALTER TRIGGER "emptest_audit" ON "emptest" DEPENDS ON EXTENSION seg;
|
||||
|
|
|
@ -64,7 +64,7 @@ ERROR: canceling statement due to user request
|
|||
SET client_min_messages TO DEBUG;
|
||||
-- 10000000000 is the node id multiplier for global pid
|
||||
SELECT pg_cancel_backend(10000000000 * citus_coordinator_nodeid() + 0);
|
||||
DEBUG: PID 0 is not a PostgreSQL server process
|
||||
DEBUG: PID 0 is not a PostgreSQL backend process
|
||||
DETAIL: from localhost:xxxxx
|
||||
pg_cancel_backend
|
||||
---------------------------------------------------------------------
|
||||
|
@ -72,7 +72,7 @@ DETAIL: from localhost:xxxxx
|
|||
(1 row)
|
||||
|
||||
SELECT pg_terminate_backend(10000000000 * citus_coordinator_nodeid() + 0);
|
||||
DEBUG: PID 0 is not a PostgreSQL server process
|
||||
DEBUG: PID 0 is not a PostgreSQL backend process
|
||||
DETAIL: from localhost:xxxxx
|
||||
pg_terminate_backend
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -1,6 +1,16 @@
|
|||
--
|
||||
-- GRANT_ON_SCHEMA_PROPAGATION
|
||||
--
|
||||
-- this test has different output for PG13/14 compared to PG15
|
||||
-- In PG15, public schema is owned by pg_database_owner role
|
||||
-- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- test grants are propagated when the schema is
|
||||
CREATE SCHEMA dist_schema;
|
||||
CREATE TABLE dist_schema.dist_table (id int);
|
||||
|
@ -332,16 +342,16 @@ GRANT USAGE ON SCHEMA PUBLIC TO PUBLIC;
|
|||
RESET ROLE;
|
||||
-- check if the grants are propagated correctly
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
public | {postgres=UC/postgres,=UC/postgres,role_1=U*C*/postgres,=U/role_1}
|
||||
public | {pg_database_owner=UC/pg_database_owner,=UC/pg_database_owner,role_1=U*C*/pg_database_owner,=U/role_1}
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
public | {postgres=UC/postgres,=UC/postgres,role_1=U*C*/postgres,=U/role_1}
|
||||
public | {pg_database_owner=UC/pg_database_owner,=UC/pg_database_owner,role_1=U*C*/pg_database_owner,=U/role_1}
|
||||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
|
@ -354,16 +364,16 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
|||
|
||||
-- check if the grants are propagated correctly
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
public | {postgres=UC/postgres,=UC/postgres,role_1=U*C*/postgres,=U/role_1}
|
||||
public | {pg_database_owner=UC/pg_database_owner,=UC/pg_database_owner,role_1=U*C*/pg_database_owner,=U/role_1}
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_2_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
public | {postgres=UC/postgres,=UC/postgres,role_1=U*C*/postgres,=U/role_1}
|
||||
public | {pg_database_owner=UC/pg_database_owner,=UC/pg_database_owner,role_1=U*C*/pg_database_owner,=U/role_1}
|
||||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
|
@ -371,16 +381,16 @@ SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspna
|
|||
REVOKE CREATE, USAGE ON SCHEMA PUBLIC FROM role_1 CASCADE;
|
||||
-- check if the grants are propagated correctly
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
public | {postgres=UC/postgres,=UC/postgres}
|
||||
public | {pg_database_owner=UC/pg_database_owner,=UC/pg_database_owner}
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
public | {postgres=UC/postgres,=UC/postgres}
|
||||
public | {pg_database_owner=UC/pg_database_owner,=UC/pg_database_owner}
|
||||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
|
|
|
@ -0,0 +1,398 @@
|
|||
--
|
||||
-- GRANT_ON_SCHEMA_PROPAGATION
|
||||
--
|
||||
-- this test has different output for PG13/14 compared to PG15
|
||||
-- In PG15, public schema is owned by pg_database_owner role
|
||||
-- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
||||
-- test grants are propagated when the schema is
|
||||
CREATE SCHEMA dist_schema;
|
||||
CREATE TABLE dist_schema.dist_table (id int);
|
||||
CREATE SCHEMA another_dist_schema;
|
||||
CREATE TABLE another_dist_schema.dist_table (id int);
|
||||
SET citus.enable_ddl_propagation TO off;
|
||||
CREATE SCHEMA non_dist_schema;
|
||||
SET citus.enable_ddl_propagation TO on;
|
||||
-- create roles on all nodes
|
||||
CREATE USER role_1;
|
||||
CREATE USER role_2;
|
||||
CREATE USER role_3;
|
||||
-- do some varying grants
|
||||
GRANT USAGE, CREATE ON SCHEMA dist_schema TO role_1 WITH GRANT OPTION;
|
||||
GRANT USAGE ON SCHEMA dist_schema TO role_2;
|
||||
SET ROLE role_1;
|
||||
GRANT USAGE ON SCHEMA dist_schema TO role_3 WITH GRANT OPTION;
|
||||
GRANT CREATE ON SCHEMA dist_schema TO role_3;
|
||||
GRANT CREATE, USAGE ON SCHEMA dist_schema TO PUBLIC;
|
||||
RESET ROLE;
|
||||
GRANT USAGE ON SCHEMA dist_schema TO PUBLIC;
|
||||
SELECT create_distributed_table('dist_schema.dist_table', 'id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('another_dist_schema.dist_table', 'id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'dist_schema';
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U/postgres,role_3=U*C/role_1,=UC/role_1,=U/postgres}
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'dist_schema';
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U/postgres,role_3=U*C/role_1,=UC/role_1,=U/postgres}
|
||||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
-- grant all permissions
|
||||
GRANT ALL ON SCHEMA dist_schema, another_dist_schema, non_dist_schema TO role_1, role_2, role_3 WITH GRANT OPTION;
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
another_dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U*C*/postgres,role_3=U*C*/postgres}
|
||||
dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U*C*/postgres,role_3=U*C/role_1,=UC/role_1,=U/postgres,role_3=U*C*/postgres}
|
||||
non_dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U*C*/postgres,role_3=U*C*/postgres}
|
||||
(3 rows)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
another_dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U*C*/postgres,role_3=U*C*/postgres}
|
||||
dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U*C*/postgres,role_3=U*C/role_1,=UC/role_1,=U/postgres,role_3=U*C*/postgres}
|
||||
(2 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
-- revoke all permissions
|
||||
REVOKE ALL ON SCHEMA dist_schema, another_dist_schema, non_dist_schema FROM role_1, role_2, role_3, PUBLIC CASCADE;
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
another_dist_schema | {postgres=UC/postgres}
|
||||
dist_schema | {postgres=UC/postgres}
|
||||
non_dist_schema | {postgres=UC/postgres}
|
||||
(3 rows)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
another_dist_schema | {postgres=UC/postgres}
|
||||
dist_schema | {postgres=UC/postgres}
|
||||
(2 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
-- grant with multiple permissions, roles and schemas
|
||||
GRANT USAGE, CREATE ON SCHEMA dist_schema, another_dist_schema, non_dist_schema TO role_1, role_2, role_3;
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
another_dist_schema | {postgres=UC/postgres,role_1=UC/postgres,role_2=UC/postgres,role_3=UC/postgres}
|
||||
dist_schema | {postgres=UC/postgres,role_1=UC/postgres,role_2=UC/postgres,role_3=UC/postgres}
|
||||
non_dist_schema | {postgres=UC/postgres,role_1=UC/postgres,role_2=UC/postgres,role_3=UC/postgres}
|
||||
(3 rows)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
another_dist_schema | {postgres=UC/postgres,role_1=UC/postgres,role_2=UC/postgres,role_3=UC/postgres}
|
||||
dist_schema | {postgres=UC/postgres,role_1=UC/postgres,role_2=UC/postgres,role_3=UC/postgres}
|
||||
(2 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
-- revoke with multiple permissions, roles and schemas
|
||||
REVOKE USAGE, CREATE ON SCHEMA dist_schema, another_dist_schema, non_dist_schema FROM role_1, role_2;
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
another_dist_schema | {postgres=UC/postgres,role_3=UC/postgres}
|
||||
dist_schema | {postgres=UC/postgres,role_3=UC/postgres}
|
||||
non_dist_schema | {postgres=UC/postgres,role_3=UC/postgres}
|
||||
(3 rows)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
another_dist_schema | {postgres=UC/postgres,role_3=UC/postgres}
|
||||
dist_schema | {postgres=UC/postgres,role_3=UC/postgres}
|
||||
(2 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
-- grant with grant option
|
||||
GRANT USAGE ON SCHEMA dist_schema TO role_1, role_3 WITH GRANT OPTION;
|
||||
\c - - - :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
another_dist_schema | {postgres=UC/postgres,role_3=UC/postgres}
|
||||
dist_schema | {postgres=UC/postgres,role_3=U*C/postgres,role_1=U*/postgres}
|
||||
(2 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
-- revoke grant option for
|
||||
REVOKE GRANT OPTION FOR USAGE ON SCHEMA dist_schema FROM role_3;
|
||||
\c - - - :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
another_dist_schema | {postgres=UC/postgres,role_3=UC/postgres}
|
||||
dist_schema | {postgres=UC/postgres,role_3=UC/postgres,role_1=U*/postgres}
|
||||
(2 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
-- test current_user
|
||||
SET citus.enable_alter_role_propagation TO ON;
|
||||
ALTER ROLE role_1 SUPERUSER;
|
||||
SET citus.enable_alter_role_propagation TO OFF;
|
||||
SET ROLE role_1;
|
||||
-- this is only supported on citus enterprise where multiple users can be managed
|
||||
-- The output of the nspname select below will indicate if the create has been granted
|
||||
GRANT CREATE ON SCHEMA dist_schema TO CURRENT_USER;
|
||||
\c - - - :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
another_dist_schema | {postgres=UC/postgres,role_3=UC/postgres}
|
||||
dist_schema | {postgres=UC/postgres,role_3=UC/postgres,role_1=U*C/postgres}
|
||||
(2 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
RESET ROLE;
|
||||
SET citus.enable_alter_role_propagation TO ON;
|
||||
ALTER ROLE role_1 NOSUPERUSER;
|
||||
SET citus.enable_alter_role_propagation TO OFF;
|
||||
DROP TABLE dist_schema.dist_table, another_dist_schema.dist_table;
|
||||
DROP SCHEMA dist_schema;
|
||||
DROP SCHEMA another_dist_schema;
|
||||
DROP SCHEMA non_dist_schema;
|
||||
-- test if the grantors are propagated correctly
|
||||
-- first remove one of the worker nodes
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
master_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- create a new schema
|
||||
CREATE SCHEMA grantor_schema;
|
||||
-- give cascading permissions
|
||||
GRANT USAGE, CREATE ON SCHEMA grantor_schema TO role_1 WITH GRANT OPTION;
|
||||
GRANT CREATE ON SCHEMA grantor_schema TO PUBLIC;
|
||||
SET ROLE role_1;
|
||||
GRANT USAGE ON SCHEMA grantor_schema TO role_2 WITH GRANT OPTION;
|
||||
GRANT CREATE ON SCHEMA grantor_schema TO role_2;
|
||||
GRANT USAGE, CREATE ON SCHEMA grantor_schema TO role_3;
|
||||
GRANT CREATE, USAGE ON SCHEMA grantor_schema TO PUBLIC;
|
||||
SET ROLE role_2;
|
||||
GRANT USAGE ON SCHEMA grantor_schema TO role_3;
|
||||
RESET ROLE;
|
||||
-- distribute the schema
|
||||
CREATE TABLE grantor_schema.grantor_table (id INT);
|
||||
SELECT create_distributed_table('grantor_schema.grantor_table', 'id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- check if the grantors are propagated correctly
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
grantor_schema | {postgres=UC/postgres,role_1=U*C*/postgres,=C/postgres,role_2=U*C/role_1,role_3=UC/role_1,=UC/role_1,role_3=U/role_2}
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
grantor_schema | {postgres=UC/postgres,role_1=U*C*/postgres,=C/postgres,role_2=U*C/role_1,role_3=UC/role_1,=UC/role_1,role_3=U/role_2}
|
||||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
-- add the previously removed node
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- check if the grantors are propagated correctly
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
grantor_schema | {postgres=UC/postgres,role_1=U*C*/postgres,=C/postgres,role_2=U*C/role_1,role_3=UC/role_1,=UC/role_1,role_3=U/role_2}
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_2_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
grantor_schema | {postgres=UC/postgres,role_1=U*C*/postgres,=C/postgres,role_2=U*C/role_1,role_3=UC/role_1,=UC/role_1,role_3=U/role_2}
|
||||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
-- revoke one of the permissions
|
||||
REVOKE USAGE ON SCHEMA grantor_schema FROM role_1 CASCADE;
|
||||
-- check if revoke worked correctly
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
grantor_schema | {postgres=UC/postgres,role_1=C*/postgres,=C/postgres,role_2=C/role_1,role_3=C/role_1,=C/role_1}
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
grantor_schema | {postgres=UC/postgres,role_1=C*/postgres,=C/postgres,role_2=C/role_1,role_3=C/role_1,=C/role_1}
|
||||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
-- test if grantor propagates correctly on already distributed schemas
|
||||
GRANT USAGE ON SCHEMA grantor_schema TO role_1 WITH GRANT OPTION;
|
||||
SET ROLE role_1;
|
||||
GRANT USAGE ON SCHEMA grantor_schema TO role_2;
|
||||
GRANT USAGE ON SCHEMA grantor_schema TO role_3 WITH GRANT OPTION;
|
||||
SET ROLE role_3;
|
||||
GRANT USAGE ON SCHEMA grantor_schema TO role_2;
|
||||
RESET ROLE;
|
||||
-- check the results
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
grantor_schema | {postgres=UC/postgres,role_1=U*C*/postgres,=C/postgres,role_2=UC/role_1,role_3=U*C/role_1,=C/role_1,role_2=U/role_3}
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
grantor_schema | {postgres=UC/postgres,role_1=U*C*/postgres,=C/postgres,role_2=UC/role_1,role_3=U*C/role_1,=C/role_1,role_2=U/role_3}
|
||||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
DROP TABLE grantor_schema.grantor_table;
|
||||
DROP SCHEMA grantor_schema CASCADE;
|
||||
-- test distributing the schema with another user
|
||||
CREATE SCHEMA dist_schema;
|
||||
GRANT ALL ON SCHEMA dist_schema TO role_1 WITH GRANT OPTION;
|
||||
SET ROLE role_1;
|
||||
GRANT ALL ON SCHEMA dist_schema TO role_2 WITH GRANT OPTION;
|
||||
CREATE TABLE dist_schema.dist_table (id int);
|
||||
SELECT create_distributed_table('dist_schema.dist_table', 'id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'dist_schema' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U*C*/role_1}
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'dist_schema' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U*C*/role_1}
|
||||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
DROP TABLE dist_schema.dist_table;
|
||||
DROP SCHEMA dist_schema CASCADE;
|
||||
-- test grants on public schema
|
||||
-- first remove one of the worker nodes
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
master_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- distribute the public schema (it has to be distributed by now but just in case)
|
||||
CREATE TABLE public_schema_table (id INT);
|
||||
SELECT create_distributed_table('public_schema_table', 'id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- give cascading permissions
|
||||
GRANT USAGE, CREATE ON SCHEMA PUBLIC TO role_1 WITH GRANT OPTION;
|
||||
SET ROLE role_1;
|
||||
GRANT USAGE ON SCHEMA PUBLIC TO PUBLIC;
|
||||
RESET ROLE;
|
||||
-- check if the grants are propagated correctly
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
public | {postgres=UC/postgres,=UC/postgres,role_1=U*C*/postgres,=U/role_1}
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
public | {postgres=UC/postgres,=UC/postgres,role_1=U*C*/postgres,=U/role_1}
|
||||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
-- add the previously removed node
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- check if the grants are propagated correctly
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
public | {postgres=UC/postgres,=UC/postgres,role_1=U*C*/postgres,=U/role_1}
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_2_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
public | {postgres=UC/postgres,=UC/postgres,role_1=U*C*/postgres,=U/role_1}
|
||||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
-- revoke those new permissions
|
||||
REVOKE CREATE, USAGE ON SCHEMA PUBLIC FROM role_1 CASCADE;
|
||||
-- check if the grants are propagated correctly
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
public | {postgres=UC/postgres,=UC/postgres}
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
public | {postgres=UC/postgres,=UC/postgres}
|
||||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
DROP TABLE public_schema_table;
|
||||
DROP ROLE role_1, role_2, role_3;
|
|
@ -1,3 +1,17 @@
|
|||
--
|
||||
-- INSERT_SELECT_REPARTITION
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- tests behaviour of INSERT INTO ... SELECT with repartitioning
|
||||
CREATE SCHEMA insert_select_repartition;
|
||||
SET search_path TO 'insert_select_repartition';
|
||||
|
@ -29,10 +43,10 @@ HINT: Ensure the target table's partition column has a corresponding simple col
|
|||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: performing repartitioned INSERT ... SELECT
|
||||
DEBUG: partitioning SELECT query by column index 0 with name 'a'
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213585 AS citus_table_alias (a) SELECT a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213583_to_0,repartitioned_results_xxxxx_from_4213584_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213586 AS citus_table_alias (a) SELECT a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213582_to_1}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213587 AS citus_table_alias (a) SELECT a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_2,repartitioned_results_xxxxx_from_4213582_to_2,repartitioned_results_xxxxx_from_4213584_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213588 AS citus_table_alias (a) SELECT a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_3}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213585 AS citus_table_alias (a) SELECT intermediate_result.a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213583_to_0,repartitioned_results_xxxxx_from_4213584_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213586 AS citus_table_alias (a) SELECT intermediate_result.a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213582_to_1}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213587 AS citus_table_alias (a) SELECT intermediate_result.a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_2,repartitioned_results_xxxxx_from_4213582_to_2,repartitioned_results_xxxxx_from_4213584_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213588 AS citus_table_alias (a) SELECT intermediate_result.a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_3}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer)
|
||||
RESET client_min_messages;
|
||||
SELECT * FROM target_table WHERE a=-1 OR a=-3 OR a=-7 ORDER BY a;
|
||||
a
|
||||
|
@ -79,8 +93,8 @@ DETAIL: The target table's partition column should correspond to a partition co
|
|||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: performing repartitioned INSERT ... SELECT
|
||||
DEBUG: partitioning SELECT query by column index 2 with name 'key'
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213591 AS citus_table_alias (f1, value, key) SELECT f1, value, key FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213589_to_0,repartitioned_results_xxxxx_from_4213590_to_0}'::text[], 'text'::citus_copy_format) intermediate_result(f1 integer, value integer, key insert_select_repartition.composite_key_type)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213592 AS citus_table_alias (f1, value, key) SELECT f1, value, key FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213589_to_1,repartitioned_results_xxxxx_from_4213590_to_1}'::text[], 'text'::citus_copy_format) intermediate_result(f1 integer, value integer, key insert_select_repartition.composite_key_type)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213591 AS citus_table_alias (f1, value, key) SELECT intermediate_result.f1, intermediate_result.value, intermediate_result.key FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213589_to_0,repartitioned_results_xxxxx_from_4213590_to_0}'::text[], 'text'::citus_copy_format) intermediate_result(f1 integer, value integer, key insert_select_repartition.composite_key_type)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213592 AS citus_table_alias (f1, value, key) SELECT intermediate_result.f1, intermediate_result.value, intermediate_result.key FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213589_to_1,repartitioned_results_xxxxx_from_4213590_to_1}'::text[], 'text'::citus_copy_format) intermediate_result(f1 integer, value integer, key insert_select_repartition.composite_key_type)
|
||||
RESET client_min_messages;
|
||||
SELECT * FROM target_table ORDER BY key;
|
||||
f1 | value | key
|
||||
|
@ -109,8 +123,8 @@ DETAIL: The target table's partition column should correspond to a partition co
|
|||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: performing repartitioned INSERT ... SELECT
|
||||
DEBUG: partitioning SELECT query by column index 2 with name 'key'
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213591 AS citus_table_alias (f1, value, key) SELECT f1, value, key FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213589_to_0,repartitioned_results_xxxxx_from_4213590_to_0}'::text[], 'text'::citus_copy_format) intermediate_result(f1 integer, value integer, key insert_select_repartition.composite_key_type)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213592 AS citus_table_alias (f1, value, key) SELECT f1, value, key FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213589_to_1,repartitioned_results_xxxxx_from_4213590_to_1}'::text[], 'text'::citus_copy_format) intermediate_result(f1 integer, value integer, key insert_select_repartition.composite_key_type)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213591 AS citus_table_alias (f1, value, key) SELECT intermediate_result.f1, intermediate_result.value, intermediate_result.key FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213589_to_0,repartitioned_results_xxxxx_from_4213590_to_0}'::text[], 'text'::citus_copy_format) intermediate_result(f1 integer, value integer, key insert_select_repartition.composite_key_type)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213592 AS citus_table_alias (f1, value, key) SELECT intermediate_result.f1, intermediate_result.value, intermediate_result.key FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213589_to_1,repartitioned_results_xxxxx_from_4213590_to_1}'::text[], 'text'::citus_copy_format) intermediate_result(f1 integer, value integer, key insert_select_repartition.composite_key_type)
|
||||
RESET client_min_messages;
|
||||
SELECT * FROM target_table ORDER BY key;
|
||||
f1 | value | key
|
||||
|
@ -133,8 +147,8 @@ DETAIL: The target table's partition column should correspond to a partition co
|
|||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: performing repartitioned INSERT ... SELECT
|
||||
DEBUG: partitioning SELECT query by column index 1 with name 'key'
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213591 AS citus_table_alias (f1, key) SELECT f1, key FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213589_to_0,repartitioned_results_xxxxx_from_4213590_to_0}'::text[], 'text'::citus_copy_format) intermediate_result(f1 integer, key insert_select_repartition.composite_key_type)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213592 AS citus_table_alias (f1, key) SELECT f1, key FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213589_to_1,repartitioned_results_xxxxx_from_4213590_to_1}'::text[], 'text'::citus_copy_format) intermediate_result(f1 integer, key insert_select_repartition.composite_key_type)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213591 AS citus_table_alias (f1, key) SELECT intermediate_result.f1, intermediate_result.key FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213589_to_0,repartitioned_results_xxxxx_from_4213590_to_0}'::text[], 'text'::citus_copy_format) intermediate_result(f1 integer, key insert_select_repartition.composite_key_type)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213592 AS citus_table_alias (f1, key) SELECT intermediate_result.f1, intermediate_result.key FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213589_to_1,repartitioned_results_xxxxx_from_4213590_to_1}'::text[], 'text'::citus_copy_format) intermediate_result(f1 integer, key insert_select_repartition.composite_key_type)
|
||||
RESET client_min_messages;
|
||||
SELECT * FROM target_table ORDER BY key;
|
||||
f1 | value | key
|
||||
|
@ -159,8 +173,8 @@ DETAIL: The target table's partition column should correspond to a partition co
|
|||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: performing repartitioned INSERT ... SELECT
|
||||
DEBUG: partitioning SELECT query by column index 1 with name 'key'
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213591 AS citus_table_alias (f1, key) SELECT f1, key FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213589_to_0}'::text[], 'text'::citus_copy_format) intermediate_result(f1 integer, key insert_select_repartition.composite_key_type) ON CONFLICT(key) DO UPDATE SET f1 = 1
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213592 AS citus_table_alias (f1, key) SELECT f1, key FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213589_to_1,repartitioned_results_xxxxx_from_4213590_to_1}'::text[], 'text'::citus_copy_format) intermediate_result(f1 integer, key insert_select_repartition.composite_key_type) ON CONFLICT(key) DO UPDATE SET f1 = 1
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213591 AS citus_table_alias (f1, key) SELECT intermediate_result.f1, intermediate_result.key FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213589_to_0}'::text[], 'text'::citus_copy_format) intermediate_result(f1 integer, key insert_select_repartition.composite_key_type) ON CONFLICT(key) DO UPDATE SET f1 = 1
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213592 AS citus_table_alias (f1, key) SELECT intermediate_result.f1, intermediate_result.key FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213589_to_1,repartitioned_results_xxxxx_from_4213590_to_1}'::text[], 'text'::citus_copy_format) intermediate_result(f1 integer, key insert_select_repartition.composite_key_type) ON CONFLICT(key) DO UPDATE SET f1 = 1
|
||||
RESET client_min_messages;
|
||||
SELECT * FROM target_table ORDER BY key;
|
||||
f1 | value | key
|
||||
|
@ -209,8 +223,8 @@ DETAIL: The data type of the target table's partition column should exactly mat
|
|||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: performing repartitioned INSERT ... SELECT
|
||||
DEBUG: partitioning SELECT query by column index 0 with name 'col_1'
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213593 AS citus_table_alias (col_1, col_2) SELECT col_1, col_2 FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213597_to_0,repartitioned_results_xxxxx_from_4213600_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer) ON CONFLICT(col_1) DO UPDATE SET col_2 = excluded.col_2
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213594 AS citus_table_alias (col_1, col_2) SELECT col_1, col_2 FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213599_to_1}'::text[], 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer) ON CONFLICT(col_1) DO UPDATE SET col_2 = excluded.col_2
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213593 AS citus_table_alias (col_1, col_2) SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213597_to_0,repartitioned_results_xxxxx_from_4213600_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer) ON CONFLICT(col_1) DO UPDATE SET col_2 = excluded.col_2
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213594 AS citus_table_alias (col_1, col_2) SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213599_to_1}'::text[], 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer) ON CONFLICT(col_1) DO UPDATE SET col_2 = excluded.col_2
|
||||
RESET client_min_messages;
|
||||
SELECT * FROM target_table ORDER BY 1;
|
||||
col_1 | col_2
|
||||
|
@ -474,7 +488,7 @@ WITH c AS (
|
|||
SELECT mapped_key, c FROM source_table
|
||||
RETURNING *)
|
||||
SELECT * FROM c ORDER by a;
|
||||
DEBUG: generating subplan XXX_1 for CTE c: INSERT INTO insert_select_repartition.target_table (a, b) SELECT mapped_key, c FROM insert_select_repartition.source_table RETURNING target_table.a, target_table.b
|
||||
DEBUG: generating subplan XXX_1 for CTE c: INSERT INTO insert_select_repartition.target_table (a, b) SELECT source_table.mapped_key, source_table.c FROM insert_select_repartition.source_table RETURNING target_table.a, target_table.b
|
||||
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT a, b FROM (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer[])) c ORDER BY a
|
||||
DEBUG: performing repartitioned INSERT ... SELECT
|
||||
|
@ -583,9 +597,9 @@ DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition
|
|||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: performing repartitioned INSERT ... SELECT
|
||||
DEBUG: partitioning SELECT query by column index 0 with name 'a'
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213610 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213606_to_0,repartitioned_results_xxxxx_from_4213607_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213611 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213607_to_1,repartitioned_results_xxxxx_from_4213609_to_1}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213612 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213606_to_2,repartitioned_results_xxxxx_from_4213607_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213610 AS citus_table_alias (a, b) SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213606_to_0,repartitioned_results_xxxxx_from_4213607_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213611 AS citus_table_alias (a, b) SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213607_to_1,repartitioned_results_xxxxx_from_4213609_to_1}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213612 AS citus_table_alias (a, b) SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213606_to_2,repartitioned_results_xxxxx_from_4213607_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer)
|
||||
RESET client_min_messages;
|
||||
SELECT * FROM target_table ORDER BY a;
|
||||
a | b
|
||||
|
@ -702,23 +716,16 @@ PREPARE insert_plan AS
|
|||
INSERT INTO target_table
|
||||
SELECT a, max(b) FROM source_table
|
||||
WHERE a BETWEEN 1 AND 2 GROUP BY a;
|
||||
SELECT public.coordinator_plan($Q$
|
||||
EXPLAIN EXECUTE insert_plan;
|
||||
QUERY PLAN
|
||||
$Q$);
|
||||
coordinator_plan
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus INSERT ... SELECT) (cost=0.00..0.00 rows=0 width=0)
|
||||
INSERT/SELECT method: repartition
|
||||
-> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=100000 width=8)
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> GroupAggregate (cost=44.09..44.28 rows=11 width=8)
|
||||
Group Key: a
|
||||
-> Sort (cost=44.09..44.12 rows=11 width=8)
|
||||
Sort Key: a
|
||||
-> Seq Scan on source_table_4213606 source_table (cost=0.00..43.90 rows=11 width=8)
|
||||
Filter: ((a >= 1) AND (a <= 2))
|
||||
(13 rows)
|
||||
(4 rows)
|
||||
|
||||
SET client_min_messages TO DEBUG1;
|
||||
EXECUTE insert_plan;
|
||||
|
@ -755,7 +762,7 @@ WITH r AS (
|
|||
INSERT INTO target_table SELECT source_table.a, max(source_table.b) FROM source_table NATURAL JOIN r GROUP BY source_table.a;
|
||||
DEBUG: INSERT target table and the source relation of the SELECT partition column value must be colocated in distributed INSERT ... SELECT
|
||||
DEBUG: only SELECT, UPDATE, or DELETE common table expressions may be router planned
|
||||
DEBUG: generating subplan XXX_1 for CTE r: INSERT INTO insert_select_repartition.target_table (a, b) SELECT a, b FROM insert_select_repartition.source_table RETURNING target_table.a, target_table.b
|
||||
DEBUG: generating subplan XXX_1 for CTE r: INSERT INTO insert_select_repartition.target_table (a, b) SELECT source_table.a, source_table.b FROM insert_select_repartition.source_table RETURNING target_table.a, target_table.b
|
||||
DEBUG: INSERT target table and the source relation of the SELECT partition column value must be colocated in distributed INSERT ... SELECT
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT a, max AS b FROM (SELECT source_table.a, max(source_table.b) AS max FROM (insert_select_repartition.source_table JOIN (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) r USING (a, b)) GROUP BY source_table.a) citus_insert_select_subquery
|
||||
|
@ -763,13 +770,13 @@ DEBUG: Router planner cannot handle multi-shard select queries
|
|||
DEBUG: performing repartitioned INSERT ... SELECT
|
||||
DEBUG: performing repartitioned INSERT ... SELECT
|
||||
DEBUG: partitioning SELECT query by column index 0 with name 'a'
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213610 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213606_to_0,repartitioned_results_xxxxx_from_4213607_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer) RETURNING citus_table_alias.a, citus_table_alias.b
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213611 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213607_to_1}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer) RETURNING citus_table_alias.a, citus_table_alias.b
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213612 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213609_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer) RETURNING citus_table_alias.a, citus_table_alias.b
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213610 AS citus_table_alias (a, b) SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213606_to_0,repartitioned_results_xxxxx_from_4213607_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer) RETURNING citus_table_alias.a, citus_table_alias.b
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213611 AS citus_table_alias (a, b) SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213607_to_1}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer) RETURNING citus_table_alias.a, citus_table_alias.b
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213612 AS citus_table_alias (a, b) SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213609_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer) RETURNING citus_table_alias.a, citus_table_alias.b
|
||||
DEBUG: partitioning SELECT query by column index 0 with name 'a'
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213610 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213606_to_0,repartitioned_results_xxxxx_from_4213607_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213611 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213607_to_1}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213612 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213609_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213610 AS citus_table_alias (a, b) SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213606_to_0,repartitioned_results_xxxxx_from_4213607_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213611 AS citus_table_alias (a, b) SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213607_to_1}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213612 AS citus_table_alias (a, b) SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213609_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer)
|
||||
RESET client_min_messages;
|
||||
SELECT * FROM target_table ORDER BY a, b;
|
||||
a | b
|
||||
|
@ -899,7 +906,7 @@ DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition
|
|||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: performing repartitioned INSERT ... SELECT
|
||||
DEBUG: partitioning SELECT query by column index 0 with name 'a'
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213617 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213613_to_0,repartitioned_results_xxxxx_from_4213614_to_0,repartitioned_results_xxxxx_from_4213615_to_0,repartitioned_results_xxxxx_from_4213616_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213617 AS citus_table_alias (a, b) SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213613_to_0,repartitioned_results_xxxxx_from_4213614_to_0,repartitioned_results_xxxxx_from_4213615_to_0,repartitioned_results_xxxxx_from_4213616_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer)
|
||||
RESET client_min_messages;
|
||||
SELECT * FROM target_table ORDER BY a, b;
|
||||
a | b
|
||||
|
@ -1010,9 +1017,9 @@ DEBUG: INSERT target table and the source relation of the SELECT partition colu
|
|||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: performing repartitioned INSERT ... SELECT
|
||||
DEBUG: partitioning SELECT query by column index 0 with name 'a'
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213633 AS citus_table_alias (a, b, c, d) SELECT a, b, c, d FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213629_to_0,repartitioned_results_xxxxx_from_4213630_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer, c integer, d integer)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213634 AS citus_table_alias (a, b, c, d) SELECT a, b, c, d FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213630_to_1,repartitioned_results_xxxxx_from_4213631_to_1}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer, c integer, d integer)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213635 AS citus_table_alias (a, b, c, d) SELECT a, b, c, d FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213632_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer, c integer, d integer)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213633 AS citus_table_alias (a, b, c, d) SELECT intermediate_result.a, intermediate_result.b, intermediate_result.c, intermediate_result.d FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213629_to_0,repartitioned_results_xxxxx_from_4213630_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer, c integer, d integer)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213634 AS citus_table_alias (a, b, c, d) SELECT intermediate_result.a, intermediate_result.b, intermediate_result.c, intermediate_result.d FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213630_to_1,repartitioned_results_xxxxx_from_4213631_to_1}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer, c integer, d integer)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213635 AS citus_table_alias (a, b, c, d) SELECT intermediate_result.a, intermediate_result.b, intermediate_result.c, intermediate_result.d FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213632_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer, c integer, d integer)
|
||||
RESET client_min_messages;
|
||||
SELECT count(*) FROM target_table;
|
||||
count
|
||||
|
@ -1214,8 +1221,8 @@ DEBUG: INSERT target table and the source relation of the SELECT partition colu
|
|||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: performing repartitioned INSERT ... SELECT
|
||||
DEBUG: partitioning SELECT query by column index 0 with name 'c1'
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213639 AS enriched (c1, c2, c3, c4, c5, c6, cardinality, sum) SELECT c1, c2, c3, c4, c5, c6, cardinality, sum FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213644_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(c1 integer, c2 integer, c3 timestamp without time zone, c4 integer, c5 integer, c6 integer[], cardinality integer, sum integer) ON CONFLICT(c1, c2, c3, c4, c5, c6) DO UPDATE SET cardinality = (enriched.cardinality OPERATOR(pg_catalog.+) excluded.cardinality), sum = (enriched.sum OPERATOR(pg_catalog.+) excluded.sum)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213641 AS enriched (c1, c2, c3, c4, c5, c6, cardinality, sum) SELECT c1, c2, c3, c4, c5, c6, cardinality, sum FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213645_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(c1 integer, c2 integer, c3 timestamp without time zone, c4 integer, c5 integer, c6 integer[], cardinality integer, sum integer) ON CONFLICT(c1, c2, c3, c4, c5, c6) DO UPDATE SET cardinality = (enriched.cardinality OPERATOR(pg_catalog.+) excluded.cardinality), sum = (enriched.sum OPERATOR(pg_catalog.+) excluded.sum)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213639 AS enriched (c1, c2, c3, c4, c5, c6, cardinality, sum) SELECT intermediate_result.c1, intermediate_result.c2, intermediate_result.c3, intermediate_result.c4, intermediate_result.c5, intermediate_result.c6, intermediate_result.cardinality, intermediate_result.sum FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213644_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(c1 integer, c2 integer, c3 timestamp without time zone, c4 integer, c5 integer, c6 integer[], cardinality integer, sum integer) ON CONFLICT(c1, c2, c3, c4, c5, c6) DO UPDATE SET cardinality = (enriched.cardinality OPERATOR(pg_catalog.+) excluded.cardinality), sum = (enriched.sum OPERATOR(pg_catalog.+) excluded.sum)
|
||||
DEBUG: distributed statement: INSERT INTO insert_select_repartition.target_table_4213641 AS enriched (c1, c2, c3, c4, c5, c6, cardinality, sum) SELECT intermediate_result.c1, intermediate_result.c2, intermediate_result.c3, intermediate_result.c4, intermediate_result.c5, intermediate_result.c6, intermediate_result.cardinality, intermediate_result.sum FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213645_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(c1 integer, c2 integer, c3 timestamp without time zone, c4 integer, c5 integer, c6 integer[], cardinality integer, sum integer) ON CONFLICT(c1, c2, c3, c4, c5, c6) DO UPDATE SET cardinality = (enriched.cardinality OPERATOR(pg_catalog.+) excluded.cardinality), sum = (enriched.sum OPERATOR(pg_catalog.+) excluded.sum)
|
||||
RESET client_min_messages;
|
||||
EXPLAIN (COSTS OFF) INSERT INTO target_table AS enriched(c1, c2, c3, c4, c5, c6, cardinality, sum)
|
||||
SELECT c1, c2, c3, c4, -1::float AS c5,
|
||||
|
@ -1254,8 +1261,10 @@ NOTICE: copying the data has completed
|
|||
|
||||
(1 row)
|
||||
|
||||
SELECT public.plan_without_result_lines($Q$
|
||||
explain (costs off) insert into table_with_sequences select y, x from table_with_sequences;
|
||||
QUERY PLAN
|
||||
$Q$);
|
||||
plan_without_result_lines
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus INSERT ... SELECT)
|
||||
INSERT/SELECT method: pull to coordinator
|
||||
|
@ -1280,8 +1289,10 @@ NOTICE: copying the data has completed
|
|||
|
||||
(1 row)
|
||||
|
||||
SELECT public.plan_without_result_lines($Q$
|
||||
explain (costs off) insert into table_with_user_sequences select y, x from table_with_user_sequences;
|
||||
QUERY PLAN
|
||||
$Q$);
|
||||
plan_without_result_lines
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus INSERT ... SELECT)
|
||||
INSERT/SELECT method: pull to coordinator
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,3 +1,17 @@
|
|||
--
|
||||
-- INTERMEDIATE_RESULT_PRUNING
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
CREATE SCHEMA intermediate_result_pruning;
|
||||
SET search_path TO intermediate_result_pruning;
|
||||
SET citus.log_intermediate_results TO TRUE;
|
||||
|
@ -1039,7 +1053,7 @@ inserts AS MATERIALIZED (
|
|||
RETURNING *
|
||||
) SELECT count(*) FROM inserts;
|
||||
DEBUG: generating subplan XXX_1 for CTE stats: SELECT count(key) AS m FROM intermediate_result_pruning.table_3
|
||||
DEBUG: generating subplan XXX_2 for CTE inserts: INSERT INTO intermediate_result_pruning.table_2 (key, value) SELECT key, count(*) AS count FROM intermediate_result_pruning.table_1 WHERE (key OPERATOR(pg_catalog.>) (SELECT stats.m FROM (SELECT intermediate_result.m FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(m bigint)) stats)) GROUP BY key HAVING (count(*) OPERATOR(pg_catalog.<) (SELECT stats.m FROM (SELECT intermediate_result.m FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(m bigint)) stats)) LIMIT 1 RETURNING table_2.key, table_2.value
|
||||
DEBUG: generating subplan XXX_2 for CTE inserts: INSERT INTO intermediate_result_pruning.table_2 (key, value) SELECT table_1.key, count(*) AS count FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.>) (SELECT stats.m FROM (SELECT intermediate_result.m FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(m bigint)) stats)) GROUP BY table_1.key HAVING (count(*) OPERATOR(pg_catalog.<) (SELECT stats.m FROM (SELECT intermediate_result.m FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(m bigint)) stats)) LIMIT 1 RETURNING table_2.key, table_2.value
|
||||
DEBUG: LIMIT clauses are not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: push down of limit count: 1
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) inserts
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,8 +1,19 @@
|
|||
--
|
||||
-- ISSUE_5248
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- backup modes of Postgres. Specifically, there is a renaming
|
||||
-- issue: pg_stop_backup PRE PG15 vs pg_backup_stop PG15+
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
CREATE SCHEMA issue_5248;
|
||||
SET search_path TO issue_5248;
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.next_shard_id TO 3013000;
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
create table countries(
|
||||
id serial primary key
|
||||
, name text
|
||||
|
@ -209,8 +220,12 @@ FROM (
|
|||
(
|
||||
SELECT utc_offset
|
||||
FROM pg_catalog.pg_timezone_names limit 1 offset 4) limit 91) AS subq_3
|
||||
WHERE pg_catalog.pg_stop_backup() > cast(NULL AS pg_lsn) limit 100;
|
||||
\if :server_version_ge_15
|
||||
WHERE pg_catalog.pg_backup_stop() > cast(NULL AS record) limit 100;
|
||||
ERROR: cannot push down subquery on the target list
|
||||
DETAIL: Subqueries in the SELECT part of the query can only be pushed down if they happen before aggregates and window functions
|
||||
\else
|
||||
WHERE pg_catalog.pg_stop_backup() > cast(NULL AS pg_lsn) limit 100;
|
||||
\endif
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP SCHEMA issue_5248 CASCADE;
|
||||
|
|
|
@ -0,0 +1,231 @@
|
|||
--
|
||||
-- ISSUE_5248
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- backup modes of Postgres. Specifically, there is a renaming
|
||||
-- issue: pg_stop_backup PRE PG15 vs pg_backup_stop PG15+
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
CREATE SCHEMA issue_5248;
|
||||
SET search_path TO issue_5248;
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.next_shard_id TO 3013000;
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
create table countries(
|
||||
id serial primary key
|
||||
, name text
|
||||
, code varchar(2) collate "C" unique
|
||||
);
|
||||
insert into countries(name, code) select 'country-'||i, i::text from generate_series(10,99) i;
|
||||
select create_reference_table('countries');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$issue_5248.countries$$)
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
create table orgs (
|
||||
id bigserial primary key
|
||||
, name text
|
||||
, created_at timestamptz default now()
|
||||
);
|
||||
select create_distributed_table('orgs', 'id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
create table users (
|
||||
id bigserial
|
||||
, org_id bigint references orgs(id)
|
||||
, name text
|
||||
, created_at timestamptz default now()
|
||||
, country_id int -- references countries(id)
|
||||
, score bigint generated always as (id + country_id) stored
|
||||
, primary key (org_id, id)
|
||||
);
|
||||
select create_distributed_table('users', 'org_id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
alter table users add constraint fk_user_country foreign key (country_id) references countries(id);
|
||||
create table orders (
|
||||
id bigserial
|
||||
, org_id bigint references orgs(id)
|
||||
, user_id bigint
|
||||
, price int
|
||||
, info jsonb
|
||||
, primary key (org_id, id)
|
||||
, foreign key (org_id, user_id) references users(org_id, id)
|
||||
);
|
||||
select create_distributed_table('orders', 'org_id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
create table events (
|
||||
id bigserial not null
|
||||
, user_id bigint not null
|
||||
, org_id bigint not null
|
||||
, event_time timestamp not null default now()
|
||||
, event_type int not null default 0
|
||||
, payload jsonb
|
||||
, primary key (user_id, id)
|
||||
);
|
||||
create index event_time_idx on events using BRIN (event_time);
|
||||
create index event_json_idx on events using gin(payload);
|
||||
select create_distributed_table('events', 'user_id'); -- on purpose don't colocate on correctly on org_id
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
create table local_data(
|
||||
id bigserial primary key
|
||||
, val int default ( (random()*100)::int )
|
||||
);
|
||||
insert into orgs(id, name) select i,'org-'||i from generate_series(1,10) i;
|
||||
insert into users(id, name, org_id, country_id) select i,'user-'||i, i+1, (i%90)+1 from generate_series(1,5) i;
|
||||
insert into orders(id, org_id, user_id, price) select i, ((i+1))+1 , i+1, i/100 from generate_series(1,2) i;
|
||||
insert into events(id, org_id, user_id, event_type) select i, ((i+1))+1 , i+1, i/100 from generate_series(1,10) i;
|
||||
insert into local_data(id) select generate_series(1,10);
|
||||
/*
|
||||
* Test that we don't get a crash. See #5248.
|
||||
*/
|
||||
SELECT subq_3.c15 AS c0,
|
||||
subq_3.c0 AS c1,
|
||||
subq_3.c15 AS c2,
|
||||
subq_0.c1 AS c3,
|
||||
pg_catalog.String_agg( Cast(
|
||||
(
|
||||
SELECT tgargs
|
||||
FROM pg_catalog.pg_trigger limit 1 offset 1) AS BYTEA), Cast(
|
||||
(
|
||||
SELECT minimum_value
|
||||
FROM columnar.chunk limit 1 offset 5) AS BYTEA)) OVER (partition BY subq_3.c10 ORDER BY subq_3.c12,subq_0.c2) AS c4,
|
||||
subq_0.c1 AS c5
|
||||
FROM (
|
||||
SELECT ref_1.address AS c0,
|
||||
ref_1.error AS c1,
|
||||
sample_0.NAME AS c2,
|
||||
sample_2.trftosql AS c3
|
||||
FROM pg_catalog.pg_statio_all_sequences AS ref_0
|
||||
INNER JOIN pg_catalog.pg_hba_file_rules AS ref_1
|
||||
ON ((
|
||||
SELECT pg_catalog.Max(aggnumdirectargs)
|
||||
FROM pg_catalog.pg_aggregate) <= ref_0.blks_hit)
|
||||
INNER JOIN countries AS sample_0 TABLESAMPLE system (6.4)
|
||||
INNER JOIN local_data AS sample_1 TABLESAMPLE bernoulli (8)
|
||||
ON ((
|
||||
true)
|
||||
OR (
|
||||
sample_0.NAME IS NOT NULL))
|
||||
INNER JOIN pg_catalog.pg_transform AS sample_2 TABLESAMPLE bernoulli (1.2)
|
||||
INNER JOIN pg_catalog.pg_language AS ref_2
|
||||
ON ((
|
||||
SELECT shard_cost_function
|
||||
FROM pg_catalog.pg_dist_rebalance_strategy limit 1 offset 1) IS NULL)
|
||||
RIGHT JOIN pg_catalog.pg_index AS sample_3 TABLESAMPLE system (0.3)
|
||||
ON ((
|
||||
cast(NULL AS bpchar) ~<=~ cast(NULL AS bpchar))
|
||||
OR ((
|
||||
EXISTS
|
||||
(
|
||||
SELECT sample_3.indnkeyatts AS c0,
|
||||
sample_2.trflang AS c1,
|
||||
sample_2.trftype AS c2
|
||||
FROM pg_catalog.pg_statistic_ext AS sample_4 TABLESAMPLE bernoulli (8.6)
|
||||
WHERE sample_2.trftype IS NOT NULL))
|
||||
AND (
|
||||
false)))
|
||||
ON (
|
||||
EXISTS
|
||||
(
|
||||
SELECT sample_0.id AS c0,
|
||||
sample_3.indisprimary AS c1
|
||||
FROM orgs AS sample_5 TABLESAMPLE system (5.3)
|
||||
WHERE false))
|
||||
ON (
|
||||
cast(NULL AS float8) >
|
||||
(
|
||||
SELECT pg_catalog.avg(enumsortorder)
|
||||
FROM pg_catalog.pg_enum) )
|
||||
WHERE cast(COALESCE(
|
||||
CASE
|
||||
WHEN ref_1.auth_method ~>=~ ref_1.auth_method THEN cast(NULL AS path)
|
||||
ELSE cast(NULL AS path)
|
||||
END , cast(NULL AS path)) AS path) = cast(NULL AS path)) AS subq_0,
|
||||
lateral
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
SELECT pg_catalog.stddev(total_time)
|
||||
FROM pg_catalog.pg_stat_user_functions) AS c0,
|
||||
subq_0.c1 AS c1,
|
||||
subq_2.c0 AS c2,
|
||||
subq_0.c2 AS c3,
|
||||
subq_0.c0 AS c4,
|
||||
cast(COALESCE(subq_2.c0, subq_2.c0) AS text) AS c5,
|
||||
subq_2.c0 AS c6,
|
||||
subq_2.c1 AS c7,
|
||||
subq_2.c1 AS c8,
|
||||
subq_2.c1 AS c9,
|
||||
subq_0.c3 AS c10,
|
||||
pg_catalog.pg_stat_get_db_temp_files( cast(
|
||||
(
|
||||
SELECT objoid
|
||||
FROM pg_catalog.pg_description limit 1 offset 1) AS oid)) AS c11,
|
||||
subq_0.c3 AS c12,
|
||||
subq_2.c1 AS c13,
|
||||
subq_0.c0 AS c14,
|
||||
subq_0.c3 AS c15,
|
||||
subq_0.c3 AS c16,
|
||||
subq_0.c1 AS c17,
|
||||
subq_0.c2 AS c18
|
||||
FROM (
|
||||
SELECT subq_1.c2 AS c0,
|
||||
subq_0.c3 AS c1
|
||||
FROM information_schema.element_types AS ref_3,
|
||||
lateral
|
||||
(
|
||||
SELECT subq_0.c1 AS c0,
|
||||
sample_6.info AS c1,
|
||||
subq_0.c2 AS c2,
|
||||
subq_0.c3 AS c3,
|
||||
ref_3.domain_default AS c4,
|
||||
sample_6.user_id AS c5,
|
||||
ref_3.collation_name AS c6
|
||||
FROM orders AS sample_6 TABLESAMPLE system (3.8)
|
||||
WHERE sample_6.price = sample_6.org_id limit 58) AS subq_1
|
||||
WHERE (
|
||||
subq_1.c2 <= subq_0.c2)
|
||||
AND (
|
||||
cast(NULL AS line) ?-| cast(NULL AS line)) limit 59) AS subq_2
|
||||
WHERE cast(COALESCE(pg_catalog.age( cast(
|
||||
(
|
||||
SELECT pg_catalog.max(event_time)
|
||||
FROM events) AS "timestamp")),
|
||||
(
|
||||
SELECT write_lag
|
||||
FROM pg_catalog.pg_stat_replication limit 1 offset 3) ) AS "interval") >
|
||||
(
|
||||
SELECT utc_offset
|
||||
FROM pg_catalog.pg_timezone_names limit 1 offset 4) limit 91) AS subq_3
|
||||
\if :server_version_ge_15
|
||||
WHERE pg_catalog.pg_backup_stop() > cast(NULL AS record) limit 100;
|
||||
\else
|
||||
WHERE pg_catalog.pg_stop_backup() > cast(NULL AS pg_lsn) limit 100;
|
||||
ERROR: cannot push down subquery on the target list
|
||||
DETAIL: Subqueries in the SELECT part of the query can only be pushed down if they happen before aggregates and window functions
|
||||
\endif
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP SCHEMA issue_5248 CASCADE;
|
|
@ -1,3 +1,17 @@
|
|||
--
|
||||
-- LOCAL_SHARD_EXECUTION
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
CREATE SCHEMA local_shard_execution;
|
||||
SET search_path TO local_shard_execution;
|
||||
SET citus.shard_count TO 4;
|
||||
|
@ -288,7 +302,7 @@ RETURNING *;
|
|||
INSERT INTO distributed_table SELECT * FROM distributed_table WHERE key = 1 OFFSET 0 ON CONFLICT DO NOTHING;
|
||||
NOTICE: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) OFFSET 0
|
||||
NOTICE: executing the copy locally for colocated file with shard xxxxx
|
||||
NOTICE: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) SELECT key, value, age FROM read_intermediate_result('insert_select_XXX_1470001'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, age bigint) ON CONFLICT DO NOTHING
|
||||
NOTICE: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) SELECT intermediate_result.key, intermediate_result.value, intermediate_result.age FROM read_intermediate_result('insert_select_XXX_1470001'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, age bigint) ON CONFLICT DO NOTHING
|
||||
INSERT INTO distributed_table SELECT 1, '1',15 FROM distributed_table WHERE key = 2 LIMIT 1 ON CONFLICT DO NOTHING;
|
||||
-- sanity check: multi-shard INSERT..SELECT pushdown goes through distributed execution
|
||||
INSERT INTO distributed_table SELECT * FROM distributed_table ON CONFLICT DO NOTHING;
|
||||
|
@ -800,7 +814,7 @@ NOTICE: executing the copy locally for shard xxxxx
|
|||
INSERT INTO distributed_table (key) SELECT -key FROM distributed_table;
|
||||
NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_1470001_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_1470001_to','SELECT (OPERATOR(pg_catalog.-) key) AS key FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0
|
||||
NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_1470003_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_1470003_to','SELECT (OPERATOR(pg_catalog.-) key) AS key FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0
|
||||
NOTICE: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key) SELECT key FROM read_intermediate_results('{repartitioned_results_xxxxx_from_1470003_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(key integer)
|
||||
NOTICE: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key) SELECT intermediate_result.key FROM read_intermediate_results('{repartitioned_results_xxxxx_from_1470003_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(key integer)
|
||||
SELECT count(*) FROM distributed_table WHERE key = -6;
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) '-6'::integer)
|
||||
count
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,3 +1,17 @@
|
|||
--
|
||||
-- LOCAL_SHARD_EXECUTION_REPLICATED
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
CREATE SCHEMA local_shard_execution_replicated;
|
||||
SET search_path TO local_shard_execution_replicated;
|
||||
SET citus.shard_count TO 4;
|
||||
|
@ -225,7 +239,7 @@ RETURNING *;
|
|||
INSERT INTO distributed_table SELECT * FROM distributed_table WHERE key = 1 OFFSET 0 ON CONFLICT DO NOTHING;
|
||||
NOTICE: executing the command locally: SELECT key, value, age FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) OFFSET 0
|
||||
NOTICE: executing the copy locally for colocated file with shard xxxxx
|
||||
NOTICE: executing the command locally: INSERT INTO local_shard_execution_replicated.distributed_table_1500001 AS citus_table_alias (key, value, age) SELECT key, value, age FROM read_intermediate_result('insert_select_XXX_1500001'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, age bigint) ON CONFLICT DO NOTHING
|
||||
NOTICE: executing the command locally: INSERT INTO local_shard_execution_replicated.distributed_table_1500001 AS citus_table_alias (key, value, age) SELECT intermediate_result.key, intermediate_result.value, intermediate_result.age FROM read_intermediate_result('insert_select_XXX_1500001'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, age bigint) ON CONFLICT DO NOTHING
|
||||
INSERT INTO distributed_table SELECT 1, '1',15 FROM distributed_table WHERE key = 2 LIMIT 1 ON CONFLICT DO NOTHING;
|
||||
NOTICE: executing the command locally: SELECT 1 AS key, '1'::text AS value, int8(15) AS age FROM local_shard_execution_replicated.distributed_table_1500004 distributed_table WHERE (key OPERATOR(pg_catalog.=) 2) LIMIT 1
|
||||
-- sanity check: multi-shard INSERT..SELECT pushdown goes through distributed execution
|
||||
|
@ -764,8 +778,8 @@ NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_r
|
|||
NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_1500002_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_1500002_to','SELECT (OPERATOR(pg_catalog.-) key) AS key FROM local_shard_execution_replicated.distributed_table_1500002 distributed_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0
|
||||
NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_1500003_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_1500003_to','SELECT (OPERATOR(pg_catalog.-) key) AS key FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0
|
||||
NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_1500004_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_1500004_to','SELECT (OPERATOR(pg_catalog.-) key) AS key FROM local_shard_execution_replicated.distributed_table_1500004 distributed_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0
|
||||
NOTICE: executing the command locally: INSERT INTO local_shard_execution_replicated.distributed_table_1500001 AS citus_table_alias (key) SELECT key FROM read_intermediate_results('{repartitioned_results_xxxxx_from_1500003_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(key integer)
|
||||
NOTICE: executing the command locally: INSERT INTO local_shard_execution_replicated.distributed_table_1500004 AS citus_table_alias (key) SELECT key FROM read_intermediate_results('{repartitioned_results_xxxxx_from_1500004_to_3}'::text[], 'binary'::citus_copy_format) intermediate_result(key integer)
|
||||
NOTICE: executing the command locally: INSERT INTO local_shard_execution_replicated.distributed_table_1500001 AS citus_table_alias (key) SELECT intermediate_result.key FROM read_intermediate_results('{repartitioned_results_xxxxx_from_1500003_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(key integer)
|
||||
NOTICE: executing the command locally: INSERT INTO local_shard_execution_replicated.distributed_table_1500004 AS citus_table_alias (key) SELECT intermediate_result.key FROM read_intermediate_results('{repartitioned_results_xxxxx_from_1500004_to_3}'::text[], 'binary'::citus_copy_format) intermediate_result(key integer)
|
||||
SELECT count(*) FROM distributed_table WHERE key = -6;
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE (key OPERATOR(pg_catalog.=) '-6'::integer)
|
||||
count
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,6 +1,17 @@
|
|||
--
|
||||
-- MULTI_DEPARSE_SHARD_QUERY
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SET citus.next_shard_id TO 13100000;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE FUNCTION deparse_shard_query_test(text)
|
||||
|
@ -63,7 +74,7 @@ SELECT deparse_shard_query_test('
|
|||
INSERT INTO raw_events_1
|
||||
SELECT * FROM raw_events_1;
|
||||
');
|
||||
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_1, value_2, value_3, value_4, value_5, value_6, value_7, event_at) SELECT tenant_id, value_1, value_2, value_3, value_4, value_5, value_6, value_7, event_at FROM public.raw_events_1
|
||||
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_1, value_2, value_3, value_4, value_5, value_6, value_7, event_at) SELECT raw_events_1_1.tenant_id, raw_events_1_1.value_1, raw_events_1_1.value_2, raw_events_1_1.value_3, raw_events_1_1.value_4, raw_events_1_1.value_5, raw_events_1_1.value_6, raw_events_1_1.value_7, raw_events_1_1.event_at FROM public.raw_events_1 raw_events_1_1
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -76,7 +87,7 @@ SELECT
|
|||
FROM
|
||||
raw_events_1;
|
||||
');
|
||||
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, event_at) SELECT tenant_id, value_4, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1
|
||||
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, event_at) SELECT raw_events_1_1.tenant_id, raw_events_1_1.value_4, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1 raw_events_1_1
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -90,7 +101,7 @@ SELECT
|
|||
FROM
|
||||
raw_events_1;
|
||||
');
|
||||
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_2, value_4, value_5, value_6, event_at) SELECT tenant_id, (value_5)::integer AS value_5, value_4, (value_2)::text AS value_2, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1
|
||||
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_2, value_4, value_5, value_6, event_at) SELECT raw_events_1_1.tenant_id, (raw_events_1_1.value_5)::integer AS value_5, raw_events_1_1.value_4, (raw_events_1_1.value_2)::text AS value_2, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1 raw_events_1_1
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -104,7 +115,7 @@ SELECT
|
|||
FROM
|
||||
raw_events_2;
|
||||
');
|
||||
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_2, value_4, value_5, value_6, event_at) SELECT tenant_id, (value_5)::integer AS value_5, value_4, (value_2)::text AS value_2, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_2
|
||||
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_2, value_4, value_5, value_6, event_at) SELECT raw_events_2.tenant_id, (raw_events_2.value_5)::integer AS value_5, raw_events_2.value_4, (raw_events_2.value_2)::text AS value_2, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_2
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -120,7 +131,7 @@ FROM
|
|||
GROUP BY
|
||||
tenant_id, date_trunc(\'hour\', event_at)
|
||||
');
|
||||
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, average_value_3, sum_value_4, average_value_6, rollup_hour) SELECT tenant_id, sum(value_1) AS sum, avg(value_3) AS avg, sum(value_4) AS sum, avg(value_6) AS avg, date_trunc('hour'::text, (event_at)::timestamp with time zone) AS date_trunc FROM public.raw_events_1 GROUP BY tenant_id, (date_trunc('hour'::text, (event_at)::timestamp with time zone))
|
||||
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, average_value_3, sum_value_4, average_value_6, rollup_hour) SELECT raw_events_1.tenant_id, sum(raw_events_1.value_1) AS sum, avg(raw_events_1.value_3) AS avg, sum(raw_events_1.value_4) AS sum, avg(raw_events_1.value_6) AS avg, date_trunc('hour'::text, (raw_events_1.event_at)::timestamp with time zone) AS date_trunc FROM public.raw_events_1 GROUP BY raw_events_1.tenant_id, (date_trunc('hour'::text, (raw_events_1.event_at)::timestamp with time zone))
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -137,7 +148,7 @@ FROM
|
|||
WHERE
|
||||
raw_events_1.tenant_id = raw_events_2.tenant_id;
|
||||
');
|
||||
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_3, value_6, event_at) SELECT raw_events_1.tenant_id, raw_events_2.value_3, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1, public.raw_events_2 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id)
|
||||
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_3, value_6, event_at) SELECT raw_events_1_1.tenant_id, raw_events_2.value_3, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1 raw_events_1_1, public.raw_events_2 WHERE (raw_events_1_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id)
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -153,7 +164,7 @@ FROM
|
|||
WHERE
|
||||
raw_events_1.tenant_id = raw_events_2.tenant_id GROUP BY raw_events_1.event_at
|
||||
');
|
||||
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_3, value_6, event_at) SELECT avg(raw_events_1.value_3) AS avg, max(raw_events_2.value_3) AS max, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1, public.raw_events_2 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id) GROUP BY raw_events_1.event_at
|
||||
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_3, value_6, event_at) SELECT avg(raw_events_1_1.value_3) AS avg, max(raw_events_2.value_3) AS max, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1 raw_events_1_1, public.raw_events_2 WHERE (raw_events_1_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id) GROUP BY raw_events_1_1.event_at
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -190,7 +201,7 @@ FROM
|
|||
GROUP BY
|
||||
event_at, tenant_id;
|
||||
');
|
||||
INFO: query: WITH first_tenant AS (SELECT raw_events_1.event_at, raw_events_1.value_5, raw_events_1.tenant_id FROM public.raw_events_1) INSERT INTO public.aggregated_events (tenant_id, sum_value_5, rollup_hour) SELECT tenant_id, sum((value_5)::integer) AS sum, event_at FROM public.raw_events_1 GROUP BY event_at, tenant_id
|
||||
INFO: query: WITH first_tenant AS (SELECT raw_events_1.event_at, raw_events_1.value_5, raw_events_1.tenant_id FROM public.raw_events_1) INSERT INTO public.aggregated_events (tenant_id, sum_value_5, rollup_hour) SELECT raw_events_1.tenant_id, sum((raw_events_1.value_5)::integer) AS sum, raw_events_1.event_at FROM public.raw_events_1 GROUP BY raw_events_1.event_at, raw_events_1.tenant_id
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -206,7 +217,7 @@ FROM
|
|||
GROUP BY
|
||||
event_at, tenant_id;
|
||||
');
|
||||
INFO: query: WITH first_tenant AS (SELECT raw_events_1.event_at, raw_events_1.value_5, raw_events_1.tenant_id FROM public.raw_events_1) INSERT INTO public.aggregated_events (tenant_id, sum_value_5) SELECT tenant_id, sum((value_5)::integer) AS sum FROM public.raw_events_1 GROUP BY event_at, tenant_id
|
||||
INFO: query: WITH first_tenant AS (SELECT raw_events_1.event_at, raw_events_1.value_5, raw_events_1.tenant_id FROM public.raw_events_1) INSERT INTO public.aggregated_events (tenant_id, sum_value_5) SELECT raw_events_1.tenant_id, sum((raw_events_1.value_5)::integer) AS sum FROM public.raw_events_1 GROUP BY raw_events_1.event_at, raw_events_1.tenant_id
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -225,7 +236,7 @@ WITH RECURSIVE hierarchy as (
|
|||
h.value_1 = re.value_6))
|
||||
SELECT * FROM hierarchy WHERE LEVEL <= 2;
|
||||
');
|
||||
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) WITH RECURSIVE hierarchy AS (SELECT raw_events_1.value_1, 1 AS level, raw_events_1.tenant_id FROM public.raw_events_1 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) 1) UNION SELECT re.value_2, (h.level OPERATOR(pg_catalog.+) 1), re.tenant_id FROM (hierarchy h JOIN public.raw_events_1 re ON (((h.tenant_id OPERATOR(pg_catalog.=) re.tenant_id) AND (h.value_1 OPERATOR(pg_catalog.=) re.value_6))))) SELECT tenant_id, value_1, level FROM hierarchy WHERE (level OPERATOR(pg_catalog.<=) 2)
|
||||
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) WITH RECURSIVE hierarchy AS (SELECT raw_events_1.value_1, 1 AS level, raw_events_1.tenant_id FROM public.raw_events_1 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) 1) UNION SELECT re.value_2, (h.level OPERATOR(pg_catalog.+) 1), re.tenant_id FROM (hierarchy h JOIN public.raw_events_1 re ON (((h.tenant_id OPERATOR(pg_catalog.=) re.tenant_id) AND (h.value_1 OPERATOR(pg_catalog.=) re.value_6))))) SELECT hierarchy.tenant_id, hierarchy.value_1, hierarchy.level FROM hierarchy WHERE (hierarchy.level OPERATOR(pg_catalog.<=) 2)
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -238,7 +249,7 @@ SELECT
|
|||
FROM
|
||||
raw_events_1;
|
||||
');
|
||||
INFO: query: INSERT INTO public.aggregated_events (sum_value_1) SELECT DISTINCT value_1 FROM public.raw_events_1
|
||||
INFO: query: INSERT INTO public.aggregated_events (sum_value_1) SELECT DISTINCT raw_events_1.value_1 FROM public.raw_events_1
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -251,7 +262,7 @@ SELECT value_3, value_2, tenant_id
|
|||
FROM raw_events_1
|
||||
WHERE (value_5 like \'%s\' or value_5 like \'%a\') and (tenant_id = 1) and (value_6 < 3000 or value_3 > 8000);
|
||||
');
|
||||
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) SELECT tenant_id, value_2, value_3 FROM public.raw_events_1 WHERE (((value_5 OPERATOR(pg_catalog.~~) '%s'::text) OR (value_5 OPERATOR(pg_catalog.~~) '%a'::text)) AND (tenant_id OPERATOR(pg_catalog.=) 1) AND ((value_6 OPERATOR(pg_catalog.<) 3000) OR (value_3 OPERATOR(pg_catalog.>) (8000)::double precision)))
|
||||
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) SELECT raw_events_1.tenant_id, raw_events_1.value_2, raw_events_1.value_3 FROM public.raw_events_1 WHERE (((raw_events_1.value_5 OPERATOR(pg_catalog.~~) '%s'::text) OR (raw_events_1.value_5 OPERATOR(pg_catalog.~~) '%a'::text)) AND (raw_events_1.tenant_id OPERATOR(pg_catalog.=) 1) AND ((raw_events_1.value_6 OPERATOR(pg_catalog.<) 3000) OR (raw_events_1.value_3 OPERATOR(pg_catalog.>) (8000)::double precision)))
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -263,7 +274,7 @@ SELECT rank() OVER (PARTITION BY tenant_id ORDER BY value_6), tenant_id
|
|||
FROM raw_events_1
|
||||
WHERE event_at = now();
|
||||
');
|
||||
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_5) SELECT tenant_id, rank() OVER (PARTITION BY tenant_id ORDER BY value_6) AS rank FROM public.raw_events_1 WHERE (event_at OPERATOR(pg_catalog.=) now())
|
||||
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_5) SELECT raw_events_1.tenant_id, rank() OVER (PARTITION BY raw_events_1.tenant_id ORDER BY raw_events_1.value_6) AS rank FROM public.raw_events_1 WHERE (raw_events_1.event_at OPERATOR(pg_catalog.=) now())
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -276,7 +287,7 @@ SELECT random(), int4eq(1, max(value_1))::int, value_6
|
|||
WHERE event_at = now()
|
||||
GROUP BY event_at, value_7, value_6;
|
||||
');
|
||||
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_4, sum_value_5) SELECT (int4eq(1, max(value_1)))::integer AS int4eq, value_6, random() AS random FROM public.raw_events_1 WHERE (event_at OPERATOR(pg_catalog.=) now()) GROUP BY event_at, value_7, value_6
|
||||
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_4, sum_value_5) SELECT (int4eq(1, max(raw_events_1.value_1)))::integer AS int4eq, raw_events_1.value_6, random() AS random FROM public.raw_events_1 WHERE (raw_events_1.event_at OPERATOR(pg_catalog.=) now()) GROUP BY raw_events_1.event_at, raw_events_1.value_7, raw_events_1.value_6
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -297,7 +308,7 @@ SELECT
|
|||
FROM
|
||||
raw_events_1;
|
||||
');
|
||||
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1) SELECT max(tenant_id) AS max, count(DISTINCT CASE WHEN (value_1 OPERATOR(pg_catalog.>) 100) THEN tenant_id ELSE (value_6)::bigint END) AS c FROM public.raw_events_1
|
||||
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1) SELECT max(raw_events_1.tenant_id) AS max, count(DISTINCT CASE WHEN (raw_events_1.value_1 OPERATOR(pg_catalog.>) 100) THEN raw_events_1.tenant_id ELSE (raw_events_1.value_6)::bigint END) AS c FROM public.raw_events_1
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -314,7 +325,7 @@ FROM
|
|||
raw_events_2
|
||||
) as foo
|
||||
');
|
||||
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_1, value_6, value_7, event_at) SELECT tenant_id, value_1, 10 AS value_6, value_7, (now())::date AS event_at FROM (SELECT raw_events_2.tenant_id, raw_events_2.value_2 AS value_7, raw_events_2.value_1 FROM public.raw_events_2) foo
|
||||
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_1, value_6, value_7, event_at) SELECT foo.tenant_id, foo.value_1, 10 AS value_6, foo.value_7, (now())::date AS event_at FROM (SELECT raw_events_2.tenant_id, raw_events_2.value_2 AS value_7, raw_events_2.value_1 FROM public.raw_events_2) foo
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -335,7 +346,7 @@ FROM
|
|||
GROUP BY
|
||||
tenant_id, date_trunc(\'hour\', event_at)
|
||||
');
|
||||
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) SELECT tenant_id, sum(value_1) AS sum, sum((value_5)::bigint) AS sum FROM (SELECT raw_events_1.event_at, raw_events_2.tenant_id, raw_events_2.value_5, raw_events_1.value_1 FROM public.raw_events_2, public.raw_events_1 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id)) foo GROUP BY tenant_id, (date_trunc('hour'::text, (event_at)::timestamp with time zone))
|
||||
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) SELECT foo.tenant_id, sum(foo.value_1) AS sum, sum((foo.value_5)::bigint) AS sum FROM (SELECT raw_events_1.event_at, raw_events_2.tenant_id, raw_events_2.value_5, raw_events_1.value_1 FROM public.raw_events_2, public.raw_events_1 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id)) foo GROUP BY foo.tenant_id, (date_trunc('hour'::text, (foo.event_at)::timestamp with time zone))
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -352,7 +363,7 @@ FROM
|
|||
raw_events_1
|
||||
) as foo
|
||||
');
|
||||
INFO: query: INSERT INTO public.raw_events_2 (tenant_id, value_1, value_2, value_3, value_4, value_6, event_at) SELECT tenant_id, value_1, value_2, value_3, value_4, (random() OPERATOR(pg_catalog.*) (100)::double precision) AS value_6, (now())::date AS event_at FROM (SELECT raw_events_1.value_2, raw_events_1.value_4, raw_events_1.tenant_id, raw_events_1.value_1, raw_events_1.value_3 FROM public.raw_events_1) foo
|
||||
INFO: query: INSERT INTO public.raw_events_2 (tenant_id, value_1, value_2, value_3, value_4, value_6, event_at) SELECT foo.tenant_id, foo.value_1, foo.value_2, foo.value_3, foo.value_4, (random() OPERATOR(pg_catalog.*) (100)::double precision) AS value_6, (now())::date AS event_at FROM (SELECT raw_events_1.value_2, raw_events_1.value_4, raw_events_1.tenant_id, raw_events_1.value_1, raw_events_1.value_3 FROM public.raw_events_1) foo
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -369,7 +380,7 @@ FROM
|
|||
raw_events_1
|
||||
) as foo
|
||||
');
|
||||
INFO: query: INSERT INTO public.raw_events_2 (tenant_id, value_1, value_2, value_3, value_4, value_6, event_at) SELECT value_2, value_4, value_1, value_3, tenant_id, (random() OPERATOR(pg_catalog.*) (100)::double precision) AS value_6, (now())::date AS event_at FROM (SELECT raw_events_1.value_2, raw_events_1.value_4, raw_events_1.tenant_id, raw_events_1.value_1, raw_events_1.value_3 FROM public.raw_events_1) foo
|
||||
INFO: query: INSERT INTO public.raw_events_2 (tenant_id, value_1, value_2, value_3, value_4, value_6, event_at) SELECT foo.value_2, foo.value_4, foo.value_1, foo.value_3, foo.tenant_id, (random() OPERATOR(pg_catalog.*) (100)::double precision) AS value_6, (now())::date AS event_at FROM (SELECT raw_events_1.value_2, raw_events_1.value_4, raw_events_1.tenant_id, raw_events_1.value_1, raw_events_1.value_3 FROM public.raw_events_1) foo
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -385,7 +396,7 @@ FROM
|
|||
ORDER BY
|
||||
value_2, value_1;
|
||||
');
|
||||
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, value_7, event_at) SELECT tenant_id, value_7, 10 AS value_6, value_7, (now())::date AS event_at FROM public.raw_events_1 ORDER BY value_2, value_1
|
||||
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, value_7, event_at) SELECT raw_events_1_1.tenant_id, raw_events_1_1.value_7, 10 AS value_6, raw_events_1_1.value_7, (now())::date AS event_at FROM public.raw_events_1 raw_events_1_1 ORDER BY raw_events_1_1.value_2, raw_events_1_1.value_1
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -400,7 +411,7 @@ SELECT
|
|||
FROM
|
||||
raw_events_1;
|
||||
');
|
||||
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, value_7, event_at) SELECT tenant_id, value_4, 10 AS value_6, value_7, (now())::date AS event_at FROM public.raw_events_1
|
||||
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, value_7, event_at) SELECT raw_events_1_1.tenant_id, raw_events_1_1.value_4, 10 AS value_6, raw_events_1_1.value_7, (now())::date AS event_at FROM public.raw_events_1 raw_events_1_1
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
|
|
@ -0,0 +1,419 @@
|
|||
--
|
||||
-- MULTI_DEPARSE_SHARD_QUERY
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
||||
SET citus.next_shard_id TO 13100000;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE FUNCTION deparse_shard_query_test(text)
|
||||
RETURNS VOID
|
||||
AS 'citus'
|
||||
LANGUAGE C STRICT;
|
||||
-- create the first table
|
||||
CREATE TABLE raw_events_1
|
||||
(tenant_id bigint,
|
||||
value_1 int,
|
||||
value_2 int,
|
||||
value_3 float,
|
||||
value_4 bigint,
|
||||
value_5 text,
|
||||
value_6 int DEfAULT 10,
|
||||
value_7 int,
|
||||
event_at date DEfAULT now()
|
||||
);
|
||||
SELECT create_distributed_table('raw_events_1', 'tenant_id', 'hash');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- create the first table
|
||||
CREATE TABLE raw_events_2
|
||||
(tenant_id bigint,
|
||||
value_1 int,
|
||||
value_2 int,
|
||||
value_3 float,
|
||||
value_4 bigint,
|
||||
value_5 text,
|
||||
value_6 float DEfAULT (random()*100)::float,
|
||||
value_7 int,
|
||||
event_at date DEfAULT now()
|
||||
);
|
||||
SELECT create_distributed_table('raw_events_2', 'tenant_id', 'hash');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE aggregated_events
|
||||
(tenant_id bigint,
|
||||
sum_value_1 bigint,
|
||||
average_value_2 float,
|
||||
average_value_3 float,
|
||||
sum_value_4 bigint,
|
||||
sum_value_5 float,
|
||||
average_value_6 int,
|
||||
rollup_hour date);
|
||||
SELECT create_distributed_table('aggregated_events', 'tenant_id', 'hash');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- start with very simple examples on a single table
|
||||
SELECT deparse_shard_query_test('
|
||||
INSERT INTO raw_events_1
|
||||
SELECT * FROM raw_events_1;
|
||||
');
|
||||
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_1, value_2, value_3, value_4, value_5, value_6, value_7, event_at) SELECT tenant_id, value_1, value_2, value_3, value_4, value_5, value_6, value_7, event_at FROM public.raw_events_1
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT deparse_shard_query_test('
|
||||
INSERT INTO raw_events_1(tenant_id, value_4)
|
||||
SELECT
|
||||
tenant_id, value_4
|
||||
FROM
|
||||
raw_events_1;
|
||||
');
|
||||
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, event_at) SELECT tenant_id, value_4, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- now that shuffle columns a bit on a single table
|
||||
SELECT deparse_shard_query_test('
|
||||
INSERT INTO raw_events_1(value_5, value_2, tenant_id, value_4)
|
||||
SELECT
|
||||
value_2::text, value_5::int, tenant_id, value_4
|
||||
FROM
|
||||
raw_events_1;
|
||||
');
|
||||
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_2, value_4, value_5, value_6, event_at) SELECT tenant_id, (value_5)::integer AS value_5, value_4, (value_2)::text AS value_2, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- same test on two different tables
|
||||
SELECT deparse_shard_query_test('
|
||||
INSERT INTO raw_events_1(value_5, value_2, tenant_id, value_4)
|
||||
SELECT
|
||||
value_2::text, value_5::int, tenant_id, value_4
|
||||
FROM
|
||||
raw_events_2;
|
||||
');
|
||||
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_2, value_4, value_5, value_6, event_at) SELECT tenant_id, (value_5)::integer AS value_5, value_4, (value_2)::text AS value_2, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_2
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- lets do some simple aggregations
|
||||
SELECT deparse_shard_query_test(E'
|
||||
INSERT INTO aggregated_events (tenant_id, rollup_hour, sum_value_1, average_value_3, average_value_6, sum_value_4)
|
||||
SELECT
|
||||
tenant_id, date_trunc(\'hour\', event_at) , sum(value_1), avg(value_3), avg(value_6), sum(value_4)
|
||||
FROM
|
||||
raw_events_1
|
||||
GROUP BY
|
||||
tenant_id, date_trunc(\'hour\', event_at)
|
||||
');
|
||||
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, average_value_3, sum_value_4, average_value_6, rollup_hour) SELECT tenant_id, sum(value_1) AS sum, avg(value_3) AS avg, sum(value_4) AS sum, avg(value_6) AS avg, date_trunc('hour'::text, (event_at)::timestamp with time zone) AS date_trunc FROM public.raw_events_1 GROUP BY tenant_id, (date_trunc('hour'::text, (event_at)::timestamp with time zone))
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- also some subqueries, JOINS with a complicated target lists
|
||||
-- a simple JOIN
|
||||
SELECT deparse_shard_query_test('
|
||||
INSERT INTO raw_events_1 (value_3, tenant_id)
|
||||
SELECT
|
||||
raw_events_2.value_3, raw_events_1.tenant_id
|
||||
FROM
|
||||
raw_events_1, raw_events_2
|
||||
WHERE
|
||||
raw_events_1.tenant_id = raw_events_2.tenant_id;
|
||||
');
|
||||
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_3, value_6, event_at) SELECT raw_events_1.tenant_id, raw_events_2.value_3, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1, public.raw_events_2 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id)
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- join with group by
|
||||
SELECT deparse_shard_query_test('
|
||||
INSERT INTO raw_events_1 (value_3, tenant_id)
|
||||
SELECT
|
||||
max(raw_events_2.value_3), avg(raw_events_1.value_3)
|
||||
FROM
|
||||
raw_events_1, raw_events_2
|
||||
WHERE
|
||||
raw_events_1.tenant_id = raw_events_2.tenant_id GROUP BY raw_events_1.event_at
|
||||
');
|
||||
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_3, value_6, event_at) SELECT avg(raw_events_1.value_3) AS avg, max(raw_events_2.value_3) AS max, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1, public.raw_events_2 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id) GROUP BY raw_events_1.event_at
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- a more complicated JOIN
|
||||
SELECT deparse_shard_query_test('
|
||||
INSERT INTO aggregated_events (sum_value_4, tenant_id)
|
||||
SELECT
|
||||
max(r1.value_4), r3.tenant_id
|
||||
FROM
|
||||
raw_events_1 r1, raw_events_2 r2, raw_events_1 r3
|
||||
WHERE
|
||||
r1.tenant_id = r2.tenant_id AND r2.tenant_id = r3.tenant_id
|
||||
GROUP BY
|
||||
r1.value_1, r3.tenant_id, r2.event_at
|
||||
ORDER BY
|
||||
r2.event_at DESC;
|
||||
');
|
||||
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_4) SELECT r3.tenant_id, max(r1.value_4) AS max FROM public.raw_events_1 r1, public.raw_events_2 r2, public.raw_events_1 r3 WHERE ((r1.tenant_id OPERATOR(pg_catalog.=) r2.tenant_id) AND (r2.tenant_id OPERATOR(pg_catalog.=) r3.tenant_id)) GROUP BY r1.value_1, r3.tenant_id, r2.event_at ORDER BY r2.event_at DESC
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- queries with CTEs are supported
|
||||
SELECT deparse_shard_query_test('
|
||||
WITH first_tenant AS (SELECT event_at, value_5, tenant_id FROM raw_events_1)
|
||||
INSERT INTO aggregated_events (rollup_hour, sum_value_5, tenant_id)
|
||||
SELECT
|
||||
event_at, sum(value_5::int), tenant_id
|
||||
FROM
|
||||
raw_events_1
|
||||
GROUP BY
|
||||
event_at, tenant_id;
|
||||
');
|
||||
INFO: query: WITH first_tenant AS (SELECT raw_events_1.event_at, raw_events_1.value_5, raw_events_1.tenant_id FROM public.raw_events_1) INSERT INTO public.aggregated_events (tenant_id, sum_value_5, rollup_hour) SELECT tenant_id, sum((value_5)::integer) AS sum, event_at FROM public.raw_events_1 GROUP BY event_at, tenant_id
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT deparse_shard_query_test('
|
||||
WITH first_tenant AS (SELECT event_at, value_5, tenant_id FROM raw_events_1)
|
||||
INSERT INTO aggregated_events (sum_value_5, tenant_id)
|
||||
SELECT
|
||||
sum(value_5::int), tenant_id
|
||||
FROM
|
||||
raw_events_1
|
||||
GROUP BY
|
||||
event_at, tenant_id;
|
||||
');
|
||||
INFO: query: WITH first_tenant AS (SELECT raw_events_1.event_at, raw_events_1.value_5, raw_events_1.tenant_id FROM public.raw_events_1) INSERT INTO public.aggregated_events (tenant_id, sum_value_5) SELECT tenant_id, sum((value_5)::integer) AS sum FROM public.raw_events_1 GROUP BY event_at, tenant_id
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT deparse_shard_query_test('
|
||||
INSERT INTO aggregated_events (sum_value_1, sum_value_5, tenant_id)
|
||||
WITH RECURSIVE hierarchy as (
|
||||
SELECT value_1, 1 AS LEVEL, tenant_id
|
||||
FROM raw_events_1
|
||||
WHERE tenant_id = 1
|
||||
UNION
|
||||
SELECT re.value_2, (h.level+1), re.tenant_id
|
||||
FROM hierarchy h JOIN raw_events_1 re
|
||||
ON (h.tenant_id = re.tenant_id AND
|
||||
h.value_1 = re.value_6))
|
||||
SELECT * FROM hierarchy WHERE LEVEL <= 2;
|
||||
');
|
||||
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) WITH RECURSIVE hierarchy AS (SELECT raw_events_1.value_1, 1 AS level, raw_events_1.tenant_id FROM public.raw_events_1 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) 1) UNION SELECT re.value_2, (h.level OPERATOR(pg_catalog.+) 1), re.tenant_id FROM (hierarchy h JOIN public.raw_events_1 re ON (((h.tenant_id OPERATOR(pg_catalog.=) re.tenant_id) AND (h.value_1 OPERATOR(pg_catalog.=) re.value_6))))) SELECT tenant_id, value_1, level FROM hierarchy WHERE (level OPERATOR(pg_catalog.<=) 2)
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT deparse_shard_query_test('
|
||||
INSERT INTO aggregated_events (sum_value_1)
|
||||
SELECT
|
||||
DISTINCT value_1
|
||||
FROM
|
||||
raw_events_1;
|
||||
');
|
||||
INFO: query: INSERT INTO public.aggregated_events (sum_value_1) SELECT DISTINCT value_1 FROM public.raw_events_1
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- many filters suffled
|
||||
SELECT deparse_shard_query_test(E'
|
||||
INSERT INTO aggregated_events (sum_value_5, sum_value_1, tenant_id)
|
||||
SELECT value_3, value_2, tenant_id
|
||||
FROM raw_events_1
|
||||
WHERE (value_5 like \'%s\' or value_5 like \'%a\') and (tenant_id = 1) and (value_6 < 3000 or value_3 > 8000);
|
||||
');
|
||||
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) SELECT tenant_id, value_2, value_3 FROM public.raw_events_1 WHERE (((value_5 OPERATOR(pg_catalog.~~) '%s'::text) OR (value_5 OPERATOR(pg_catalog.~~) '%a'::text)) AND (tenant_id OPERATOR(pg_catalog.=) 1) AND ((value_6 OPERATOR(pg_catalog.<) 3000) OR (value_3 OPERATOR(pg_catalog.>) (8000)::double precision)))
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT deparse_shard_query_test(E'
|
||||
INSERT INTO aggregated_events (sum_value_5, tenant_id)
|
||||
SELECT rank() OVER (PARTITION BY tenant_id ORDER BY value_6), tenant_id
|
||||
FROM raw_events_1
|
||||
WHERE event_at = now();
|
||||
');
|
||||
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_5) SELECT tenant_id, rank() OVER (PARTITION BY tenant_id ORDER BY value_6) AS rank FROM public.raw_events_1 WHERE (event_at OPERATOR(pg_catalog.=) now())
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT deparse_shard_query_test(E'
|
||||
INSERT INTO aggregated_events (sum_value_5, tenant_id, sum_value_4)
|
||||
SELECT random(), int4eq(1, max(value_1))::int, value_6
|
||||
FROM raw_events_1
|
||||
WHERE event_at = now()
|
||||
GROUP BY event_at, value_7, value_6;
|
||||
');
|
||||
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_4, sum_value_5) SELECT (int4eq(1, max(value_1)))::integer AS int4eq, value_6, random() AS random FROM public.raw_events_1 WHERE (event_at OPERATOR(pg_catalog.=) now()) GROUP BY event_at, value_7, value_6
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT deparse_shard_query_test('
|
||||
INSERT INTO aggregated_events (sum_value_1, tenant_id)
|
||||
SELECT
|
||||
count(DISTINCT CASE
|
||||
WHEN
|
||||
value_1 > 100
|
||||
THEN
|
||||
tenant_id
|
||||
ELSE
|
||||
value_6
|
||||
END) as c,
|
||||
max(tenant_id)
|
||||
FROM
|
||||
raw_events_1;
|
||||
');
|
||||
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1) SELECT max(tenant_id) AS max, count(DISTINCT CASE WHEN (value_1 OPERATOR(pg_catalog.>) 100) THEN tenant_id ELSE (value_6)::bigint END) AS c FROM public.raw_events_1
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT deparse_shard_query_test('
|
||||
INSERT INTO raw_events_1(value_7, value_1, tenant_id)
|
||||
SELECT
|
||||
value_7, value_1, tenant_id
|
||||
FROM
|
||||
(SELECT
|
||||
tenant_id, value_2 as value_7, value_1
|
||||
FROM
|
||||
raw_events_2
|
||||
) as foo
|
||||
');
|
||||
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_1, value_6, value_7, event_at) SELECT tenant_id, value_1, 10 AS value_6, value_7, (now())::date AS event_at FROM (SELECT raw_events_2.tenant_id, raw_events_2.value_2 AS value_7, raw_events_2.value_1 FROM public.raw_events_2) foo
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT deparse_shard_query_test(E'
|
||||
INSERT INTO aggregated_events(sum_value_1, tenant_id, sum_value_5)
|
||||
SELECT
|
||||
sum(value_1), tenant_id, sum(value_5::bigint)
|
||||
FROM
|
||||
(SELECT
|
||||
raw_events_1.event_at, raw_events_2.tenant_id, raw_events_2.value_5, raw_events_1.value_1
|
||||
FROM
|
||||
raw_events_2, raw_events_1
|
||||
WHERE
|
||||
raw_events_1.tenant_id = raw_events_2.tenant_id
|
||||
) as foo
|
||||
GROUP BY
|
||||
tenant_id, date_trunc(\'hour\', event_at)
|
||||
');
|
||||
INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) SELECT tenant_id, sum(value_1) AS sum, sum((value_5)::bigint) AS sum FROM (SELECT raw_events_1.event_at, raw_events_2.tenant_id, raw_events_2.value_5, raw_events_1.value_1 FROM public.raw_events_2, public.raw_events_1 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id)) foo GROUP BY tenant_id, (date_trunc('hour'::text, (event_at)::timestamp with time zone))
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT deparse_shard_query_test(E'
|
||||
INSERT INTO raw_events_2(tenant_id, value_1, value_2, value_3, value_4)
|
||||
SELECT
|
||||
tenant_id, value_1, value_2, value_3, value_4
|
||||
FROM
|
||||
(SELECT
|
||||
value_2, value_4, tenant_id, value_1, value_3
|
||||
FROM
|
||||
raw_events_1
|
||||
) as foo
|
||||
');
|
||||
INFO: query: INSERT INTO public.raw_events_2 (tenant_id, value_1, value_2, value_3, value_4, value_6, event_at) SELECT tenant_id, value_1, value_2, value_3, value_4, (random() OPERATOR(pg_catalog.*) (100)::double precision) AS value_6, (now())::date AS event_at FROM (SELECT raw_events_1.value_2, raw_events_1.value_4, raw_events_1.tenant_id, raw_events_1.value_1, raw_events_1.value_3 FROM public.raw_events_1) foo
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT deparse_shard_query_test(E'
|
||||
INSERT INTO raw_events_2(tenant_id, value_1, value_4, value_2, value_3)
|
||||
SELECT
|
||||
*
|
||||
FROM
|
||||
(SELECT
|
||||
value_2, value_4, tenant_id, value_1, value_3
|
||||
FROM
|
||||
raw_events_1
|
||||
) as foo
|
||||
');
|
||||
INFO: query: INSERT INTO public.raw_events_2 (tenant_id, value_1, value_2, value_3, value_4, value_6, event_at) SELECT value_2, value_4, value_1, value_3, tenant_id, (random() OPERATOR(pg_catalog.*) (100)::double precision) AS value_6, (now())::date AS event_at FROM (SELECT raw_events_1.value_2, raw_events_1.value_4, raw_events_1.tenant_id, raw_events_1.value_1, raw_events_1.value_3 FROM public.raw_events_1) foo
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- use a column multiple times
|
||||
SELECT deparse_shard_query_test('
|
||||
INSERT INTO raw_events_1(tenant_id, value_7, value_4)
|
||||
SELECT
|
||||
tenant_id, value_7, value_7
|
||||
FROM
|
||||
raw_events_1
|
||||
ORDER BY
|
||||
value_2, value_1;
|
||||
');
|
||||
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, value_7, event_at) SELECT tenant_id, value_7, 10 AS value_6, value_7, (now())::date AS event_at FROM public.raw_events_1 ORDER BY value_2, value_1
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- test dropped table as well
|
||||
ALTER TABLE raw_events_1 DROP COLUMN value_5;
|
||||
SELECT deparse_shard_query_test('
|
||||
INSERT INTO raw_events_1(tenant_id, value_7, value_4)
|
||||
SELECT
|
||||
tenant_id, value_7, value_4
|
||||
FROM
|
||||
raw_events_1;
|
||||
');
|
||||
INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, value_7, event_at) SELECT tenant_id, value_4, 10 AS value_6, value_7, (now())::date AS event_at FROM public.raw_events_1
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
|
@ -301,12 +301,14 @@ Sort
|
|||
Group Key: l_quantity
|
||||
-> Seq Scan on lineitem_360000 lineitem
|
||||
-- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output)
|
||||
SELECT public.plan_normalize_memory($Q$
|
||||
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE)
|
||||
SELECT l_quantity, count(*) count_quantity FROM lineitem
|
||||
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
|
||||
$Q$);
|
||||
Sort (actual rows=50 loops=1)
|
||||
Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
|
||||
Sort Method: quicksort Memory: 27kB
|
||||
Sort Method: quicksort Memory: xxx
|
||||
-> HashAggregate (actual rows=50 loops=1)
|
||||
Group Key: remote_scan.l_quantity
|
||||
-> Custom Scan (Citus Adaptive) (actual rows=100 loops=1)
|
||||
|
@ -369,13 +371,15 @@ Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
|
|||
END;
|
||||
DROP TABLE t1, t2;
|
||||
-- Test query text output, with ANALYZE ON
|
||||
SELECT public.plan_normalize_memory($Q$
|
||||
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE)
|
||||
SELECT l_quantity, count(*) count_quantity FROM lineitem
|
||||
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
|
||||
$Q$);
|
||||
Sort (actual rows=50 loops=1)
|
||||
Output: remote_scan.l_quantity, (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))
|
||||
Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
|
||||
Sort Method: quicksort Memory: 27kB
|
||||
Sort Method: quicksort Memory: xxx
|
||||
-> HashAggregate (actual rows=50 loops=1)
|
||||
Output: remote_scan.l_quantity, COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)
|
||||
Group Key: remote_scan.l_quantity
|
||||
|
@ -632,6 +636,21 @@ Aggregate
|
|||
-> Seq Scan on events_1400285 events
|
||||
Filter: ((event_type)::text = ANY ('{click,submit,pay}'::text[]))
|
||||
-- Union and left join subquery pushdown
|
||||
-- enable_group_by_reordering is a new GUC introduced in PG15
|
||||
-- it does some optimization of the order of group by keys which results
|
||||
-- in a different explain output plan between PG13/14 and PG15
|
||||
-- Hence we set that GUC to off.
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
\if :server_version_ge_15
|
||||
SET enable_group_by_reordering TO off;
|
||||
\endif
|
||||
SELECT DISTINCT 1 FROM run_command_on_workers($$ALTER SYSTEM SET enable_group_by_reordering TO off;$$);
|
||||
1
|
||||
SELECT run_command_on_workers($$SELECT pg_reload_conf()$$);
|
||||
(localhost,57637,t,t)
|
||||
(localhost,57638,t,t)
|
||||
EXPLAIN (COSTS OFF)
|
||||
SELECT
|
||||
avg(array_length(events, 1)) AS event_average,
|
||||
|
@ -854,6 +873,14 @@ Sort
|
|||
Sort Key: events_2.composite_id
|
||||
-> Seq Scan on events_1400285 events_2
|
||||
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type) AND ((event_type)::text = 'pay'::text))
|
||||
\if :server_version_ge_15
|
||||
RESET enable_group_by_reordering;
|
||||
\endif
|
||||
SELECT DISTINCT 1 FROM run_command_on_workers($$ALTER SYSTEM RESET enable_group_by_reordering;$$);
|
||||
1
|
||||
SELECT run_command_on_workers($$SELECT pg_reload_conf()$$);
|
||||
(localhost,57637,t,t)
|
||||
(localhost,57638,t,t)
|
||||
-- Lateral join subquery pushdown
|
||||
-- set subquery_pushdown due to limit in the query
|
||||
SET citus.subquery_pushdown to ON;
|
||||
|
@ -1023,12 +1050,14 @@ Custom Scan (Citus Adaptive)
|
|||
-> Delete on lineitem_hash_part_360044 lineitem_hash_part
|
||||
-> Seq Scan on lineitem_hash_part_360044 lineitem_hash_part
|
||||
-- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output)
|
||||
SELECT public.plan_normalize_memory($Q$
|
||||
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE)
|
||||
SELECT l_quantity, count(*) count_quantity FROM lineitem
|
||||
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
|
||||
$Q$);
|
||||
Sort (actual rows=50 loops=1)
|
||||
Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
|
||||
Sort Method: quicksort Memory: 27kB
|
||||
Sort Method: quicksort Memory: xxx
|
||||
-> HashAggregate (actual rows=50 loops=1)
|
||||
Group Key: remote_scan.l_quantity
|
||||
-> Custom Scan (Citus Adaptive) (actual rows=100 loops=1)
|
||||
|
|
|
@ -724,6 +724,14 @@ SELECT * FROM multi_extension.print_extension_changes();
|
|||
|
||||
-- recreate public schema, and recreate citus_tables in the public schema by default
|
||||
CREATE SCHEMA public;
|
||||
-- In PG15, public schema is owned by pg_database_owner role
|
||||
-- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
\if :server_version_ge_15
|
||||
ALTER SCHEMA public OWNER TO pg_database_owner;
|
||||
\endif
|
||||
GRANT ALL ON SCHEMA public TO public;
|
||||
ALTER EXTENSION citus UPDATE TO '9.5-1';
|
||||
ALTER EXTENSION citus UPDATE TO '10.0-4';
|
||||
|
|
|
@ -1,6 +1,17 @@
|
|||
--
|
||||
-- MULTI_INSERT_SELECT
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SET citus.next_shard_id TO 13300000;
|
||||
SET citus.next_placement_id TO 13300000;
|
||||
-- create co-located tables
|
||||
|
@ -63,10 +74,10 @@ INSERT INTO raw_events_first (user_id, time, value_1, value_2, value_3, value_4)
|
|||
SET client_min_messages TO DEBUG2;
|
||||
-- raw table to raw table
|
||||
INSERT INTO raw_events_second SELECT * FROM raw_events_first;
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300004 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.raw_events_first_13300000 raw_events_first WHERE (user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.raw_events_first_13300001 raw_events_first WHERE (user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300006 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.raw_events_first_13300002 raw_events_first WHERE (user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.raw_events_first_13300003 raw_events_first WHERE (user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300004 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT raw_events_first.user_id, raw_events_first."time", raw_events_first.value_1, raw_events_first.value_2, raw_events_first.value_3, raw_events_first.value_4 FROM public.raw_events_first_13300000 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT raw_events_first.user_id, raw_events_first."time", raw_events_first.value_1, raw_events_first.value_2, raw_events_first.value_3, raw_events_first.value_4 FROM public.raw_events_first_13300001 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300006 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT raw_events_first.user_id, raw_events_first."time", raw_events_first.value_1, raw_events_first.value_2, raw_events_first.value_3, raw_events_first.value_4 FROM public.raw_events_first_13300002 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT raw_events_first.user_id, raw_events_first."time", raw_events_first.value_1, raw_events_first.value_2, raw_events_first.value_3, raw_events_first.value_4 FROM public.raw_events_first_13300003 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL)
|
||||
-- see that our first multi shard INSERT...SELECT works expected
|
||||
SET client_min_messages TO INFO;
|
||||
SELECT
|
||||
|
@ -152,7 +163,7 @@ INSERT INTO raw_events_first (user_id, time) VALUES
|
|||
SET client_min_messages TO DEBUG2;
|
||||
INSERT INTO raw_events_second (user_id, time) SELECT user_id, time FROM raw_events_first WHERE user_id = 7;
|
||||
DEBUG: Skipping target shard interval 13300004 since SELECT query for it pruned away
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS citus_table_alias (user_id, "time") SELECT user_id, "time" FROM public.raw_events_first_13300001 raw_events_first WHERE ((user_id OPERATOR(pg_catalog.=) 7) AND (user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS citus_table_alias (user_id, "time") SELECT raw_events_first.user_id, raw_events_first."time" FROM public.raw_events_first_13300001 raw_events_first WHERE ((raw_events_first.user_id OPERATOR(pg_catalog.=) 7) AND (raw_events_first.user_id IS NOT NULL))
|
||||
DEBUG: Skipping target shard interval 13300006 since SELECT query for it pruned away
|
||||
DEBUG: Skipping target shard interval 13300007 since SELECT query for it pruned away
|
||||
SET client_min_messages TO INFO;
|
||||
|
@ -168,7 +179,7 @@ FROM
|
|||
raw_events_first
|
||||
WHERE
|
||||
user_id = 8;
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300004 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.raw_events_first_13300000 raw_events_first WHERE ((user_id OPERATOR(pg_catalog.=) 8) AND (user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300004 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT raw_events_first.user_id, raw_events_first."time", raw_events_first.value_1, raw_events_first.value_2, raw_events_first.value_3, raw_events_first.value_4 FROM public.raw_events_first_13300000 raw_events_first WHERE ((raw_events_first.user_id OPERATOR(pg_catalog.=) 8) AND (raw_events_first.user_id IS NOT NULL))
|
||||
DEBUG: Skipping target shard interval 13300005 since SELECT query for it pruned away
|
||||
DEBUG: Skipping target shard interval 13300006 since SELECT query for it pruned away
|
||||
DEBUG: Skipping target shard interval 13300007 since SELECT query for it pruned away
|
||||
|
@ -210,10 +221,10 @@ FROM
|
|||
WHERE
|
||||
value_3 = 9000
|
||||
RETURNING *;
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300004 AS citus_table_alias (user_id, value_1, value_3) SELECT user_id, value_1, value_3 FROM public.raw_events_first_13300000 raw_events_first WHERE ((value_3 OPERATOR(pg_catalog.=) (9000)::double precision) AND (user_id IS NOT NULL)) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS citus_table_alias (user_id, value_1, value_3) SELECT user_id, value_1, value_3 FROM public.raw_events_first_13300001 raw_events_first WHERE ((value_3 OPERATOR(pg_catalog.=) (9000)::double precision) AND (user_id IS NOT NULL)) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300006 AS citus_table_alias (user_id, value_1, value_3) SELECT user_id, value_1, value_3 FROM public.raw_events_first_13300002 raw_events_first WHERE ((value_3 OPERATOR(pg_catalog.=) (9000)::double precision) AND (user_id IS NOT NULL)) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id, value_1, value_3) SELECT user_id, value_1, value_3 FROM public.raw_events_first_13300003 raw_events_first WHERE ((value_3 OPERATOR(pg_catalog.=) (9000)::double precision) AND (user_id IS NOT NULL)) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300004 AS citus_table_alias (user_id, value_1, value_3) SELECT raw_events_first.user_id, raw_events_first.value_1, raw_events_first.value_3 FROM public.raw_events_first_13300000 raw_events_first WHERE ((raw_events_first.value_3 OPERATOR(pg_catalog.=) (9000)::double precision) AND (raw_events_first.user_id IS NOT NULL)) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS citus_table_alias (user_id, value_1, value_3) SELECT raw_events_first.user_id, raw_events_first.value_1, raw_events_first.value_3 FROM public.raw_events_first_13300001 raw_events_first WHERE ((raw_events_first.value_3 OPERATOR(pg_catalog.=) (9000)::double precision) AND (raw_events_first.user_id IS NOT NULL)) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300006 AS citus_table_alias (user_id, value_1, value_3) SELECT raw_events_first.user_id, raw_events_first.value_1, raw_events_first.value_3 FROM public.raw_events_first_13300002 raw_events_first WHERE ((raw_events_first.value_3 OPERATOR(pg_catalog.=) (9000)::double precision) AND (raw_events_first.user_id IS NOT NULL)) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id, value_1, value_3) SELECT raw_events_first.user_id, raw_events_first.value_1, raw_events_first.value_3 FROM public.raw_events_first_13300003 raw_events_first WHERE ((raw_events_first.value_3 OPERATOR(pg_catalog.=) (9000)::double precision) AND (raw_events_first.user_id IS NOT NULL)) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4
|
||||
user_id | time | value_1 | value_2 | value_3 | value_4
|
||||
---------------------------------------------------------------------
|
||||
9 | | 90 | | 9000 |
|
||||
|
@ -230,9 +241,9 @@ WHERE
|
|||
user_id = 9 OR user_id = 16
|
||||
RETURNING *;
|
||||
DEBUG: Skipping target shard interval 13300004 since SELECT query for it pruned away
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS citus_table_alias (user_id, value_1, value_3) SELECT user_id, value_1, value_3 FROM public.raw_events_first_13300001 raw_events_first WHERE (((user_id OPERATOR(pg_catalog.=) 9) OR (user_id OPERATOR(pg_catalog.=) 16)) AND (user_id IS NOT NULL)) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS citus_table_alias (user_id, value_1, value_3) SELECT raw_events_first.user_id, raw_events_first.value_1, raw_events_first.value_3 FROM public.raw_events_first_13300001 raw_events_first WHERE (((raw_events_first.user_id OPERATOR(pg_catalog.=) 9) OR (raw_events_first.user_id OPERATOR(pg_catalog.=) 16)) AND (raw_events_first.user_id IS NOT NULL)) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4
|
||||
DEBUG: Skipping target shard interval 13300006 since SELECT query for it pruned away
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id, value_1, value_3) SELECT user_id, value_1, value_3 FROM public.raw_events_first_13300003 raw_events_first WHERE (((user_id OPERATOR(pg_catalog.=) 9) OR (user_id OPERATOR(pg_catalog.=) 16)) AND (user_id IS NOT NULL)) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id, value_1, value_3) SELECT raw_events_first.user_id, raw_events_first.value_1, raw_events_first.value_3 FROM public.raw_events_first_13300003 raw_events_first WHERE (((raw_events_first.user_id OPERATOR(pg_catalog.=) 9) OR (raw_events_first.user_id OPERATOR(pg_catalog.=) 16)) AND (raw_events_first.user_id IS NOT NULL)) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4
|
||||
ERROR: duplicate key value violates unique constraint "raw_events_second_user_id_value_1_key_xxxxxxx"
|
||||
-- now do some aggregations
|
||||
INSERT INTO agg_events
|
||||
|
@ -242,10 +253,10 @@ FROM
|
|||
raw_events_first
|
||||
GROUP BY
|
||||
user_id;
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_1_agg, value_2_agg, value_3_agg, value_4_agg) SELECT user_id, sum(value_1) AS sum, avg(value_2) AS avg, sum(value_3) AS sum, count(value_4) AS count FROM public.raw_events_first_13300000 raw_events_first WHERE (user_id IS NOT NULL) GROUP BY user_id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_1_agg, value_2_agg, value_3_agg, value_4_agg) SELECT user_id, sum(value_1) AS sum, avg(value_2) AS avg, sum(value_3) AS sum, count(value_4) AS count FROM public.raw_events_first_13300001 raw_events_first WHERE (user_id IS NOT NULL) GROUP BY user_id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg, value_2_agg, value_3_agg, value_4_agg) SELECT user_id, sum(value_1) AS sum, avg(value_2) AS avg, sum(value_3) AS sum, count(value_4) AS count FROM public.raw_events_first_13300002 raw_events_first WHERE (user_id IS NOT NULL) GROUP BY user_id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg, value_2_agg, value_3_agg, value_4_agg) SELECT user_id, sum(value_1) AS sum, avg(value_2) AS avg, sum(value_3) AS sum, count(value_4) AS count FROM public.raw_events_first_13300003 raw_events_first WHERE (user_id IS NOT NULL) GROUP BY user_id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_1_agg, value_2_agg, value_3_agg, value_4_agg) SELECT raw_events_first.user_id, sum(raw_events_first.value_1) AS sum, avg(raw_events_first.value_2) AS avg, sum(raw_events_first.value_3) AS sum, count(raw_events_first.value_4) AS count FROM public.raw_events_first_13300000 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL) GROUP BY raw_events_first.user_id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_1_agg, value_2_agg, value_3_agg, value_4_agg) SELECT raw_events_first.user_id, sum(raw_events_first.value_1) AS sum, avg(raw_events_first.value_2) AS avg, sum(raw_events_first.value_3) AS sum, count(raw_events_first.value_4) AS count FROM public.raw_events_first_13300001 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL) GROUP BY raw_events_first.user_id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg, value_2_agg, value_3_agg, value_4_agg) SELECT raw_events_first.user_id, sum(raw_events_first.value_1) AS sum, avg(raw_events_first.value_2) AS avg, sum(raw_events_first.value_3) AS sum, count(raw_events_first.value_4) AS count FROM public.raw_events_first_13300002 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL) GROUP BY raw_events_first.user_id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg, value_2_agg, value_3_agg, value_4_agg) SELECT raw_events_first.user_id, sum(raw_events_first.value_1) AS sum, avg(raw_events_first.value_2) AS avg, sum(raw_events_first.value_3) AS sum, count(raw_events_first.value_4) AS count FROM public.raw_events_first_13300003 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL) GROUP BY raw_events_first.user_id
|
||||
-- group by column not exists on the SELECT target list
|
||||
INSERT INTO agg_events (value_3_agg, value_4_agg, value_1_agg, user_id)
|
||||
SELECT
|
||||
|
@ -255,10 +266,10 @@ FROM
|
|||
GROUP BY
|
||||
value_2, user_id
|
||||
RETURNING *;
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_1_agg, value_3_agg, value_4_agg) SELECT user_id, sum(value_1) AS sum, sum(value_3) AS sum, count(value_4) AS count FROM public.raw_events_first_13300000 raw_events_first WHERE (user_id IS NOT NULL) GROUP BY value_2, user_id RETURNING citus_table_alias.user_id, citus_table_alias.value_1_agg, citus_table_alias.value_2_agg, citus_table_alias.value_3_agg, citus_table_alias.value_4_agg, citus_table_alias.agg_time
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_1_agg, value_3_agg, value_4_agg) SELECT user_id, sum(value_1) AS sum, sum(value_3) AS sum, count(value_4) AS count FROM public.raw_events_first_13300001 raw_events_first WHERE (user_id IS NOT NULL) GROUP BY value_2, user_id RETURNING citus_table_alias.user_id, citus_table_alias.value_1_agg, citus_table_alias.value_2_agg, citus_table_alias.value_3_agg, citus_table_alias.value_4_agg, citus_table_alias.agg_time
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg, value_3_agg, value_4_agg) SELECT user_id, sum(value_1) AS sum, sum(value_3) AS sum, count(value_4) AS count FROM public.raw_events_first_13300002 raw_events_first WHERE (user_id IS NOT NULL) GROUP BY value_2, user_id RETURNING citus_table_alias.user_id, citus_table_alias.value_1_agg, citus_table_alias.value_2_agg, citus_table_alias.value_3_agg, citus_table_alias.value_4_agg, citus_table_alias.agg_time
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg, value_3_agg, value_4_agg) SELECT user_id, sum(value_1) AS sum, sum(value_3) AS sum, count(value_4) AS count FROM public.raw_events_first_13300003 raw_events_first WHERE (user_id IS NOT NULL) GROUP BY value_2, user_id RETURNING citus_table_alias.user_id, citus_table_alias.value_1_agg, citus_table_alias.value_2_agg, citus_table_alias.value_3_agg, citus_table_alias.value_4_agg, citus_table_alias.agg_time
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_1_agg, value_3_agg, value_4_agg) SELECT raw_events_first.user_id, sum(raw_events_first.value_1) AS sum, sum(raw_events_first.value_3) AS sum, count(raw_events_first.value_4) AS count FROM public.raw_events_first_13300000 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL) GROUP BY raw_events_first.value_2, raw_events_first.user_id RETURNING citus_table_alias.user_id, citus_table_alias.value_1_agg, citus_table_alias.value_2_agg, citus_table_alias.value_3_agg, citus_table_alias.value_4_agg, citus_table_alias.agg_time
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_1_agg, value_3_agg, value_4_agg) SELECT raw_events_first.user_id, sum(raw_events_first.value_1) AS sum, sum(raw_events_first.value_3) AS sum, count(raw_events_first.value_4) AS count FROM public.raw_events_first_13300001 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL) GROUP BY raw_events_first.value_2, raw_events_first.user_id RETURNING citus_table_alias.user_id, citus_table_alias.value_1_agg, citus_table_alias.value_2_agg, citus_table_alias.value_3_agg, citus_table_alias.value_4_agg, citus_table_alias.agg_time
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg, value_3_agg, value_4_agg) SELECT raw_events_first.user_id, sum(raw_events_first.value_1) AS sum, sum(raw_events_first.value_3) AS sum, count(raw_events_first.value_4) AS count FROM public.raw_events_first_13300002 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL) GROUP BY raw_events_first.value_2, raw_events_first.user_id RETURNING citus_table_alias.user_id, citus_table_alias.value_1_agg, citus_table_alias.value_2_agg, citus_table_alias.value_3_agg, citus_table_alias.value_4_agg, citus_table_alias.agg_time
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg, value_3_agg, value_4_agg) SELECT raw_events_first.user_id, sum(raw_events_first.value_1) AS sum, sum(raw_events_first.value_3) AS sum, count(raw_events_first.value_4) AS count FROM public.raw_events_first_13300003 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL) GROUP BY raw_events_first.value_2, raw_events_first.user_id RETURNING citus_table_alias.user_id, citus_table_alias.value_1_agg, citus_table_alias.value_2_agg, citus_table_alias.value_3_agg, citus_table_alias.value_4_agg, citus_table_alias.agg_time
|
||||
ERROR: duplicate key value violates unique constraint "agg_events_user_id_value_1_agg_key_xxxxxxx"
|
||||
-- some subquery tests
|
||||
INSERT INTO agg_events
|
||||
|
@ -273,10 +284,10 @@ FROM (SELECT raw_events_second.user_id AS id,
|
|||
WHERE raw_events_first.user_id = raw_events_second.user_id) AS foo
|
||||
GROUP BY id
|
||||
ORDER BY id;
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_1_agg) SELECT id, sum(value_1) AS sum FROM (SELECT raw_events_second.user_id AS id, raw_events_second.value_1 FROM public.raw_events_first_13300000 raw_events_first, public.raw_events_second_13300004 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id)) foo WHERE (id IS NOT NULL) GROUP BY id ORDER BY id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_1_agg) SELECT id, sum(value_1) AS sum FROM (SELECT raw_events_second.user_id AS id, raw_events_second.value_1 FROM public.raw_events_first_13300001 raw_events_first, public.raw_events_second_13300005 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id)) foo WHERE (id IS NOT NULL) GROUP BY id ORDER BY id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg) SELECT id, sum(value_1) AS sum FROM (SELECT raw_events_second.user_id AS id, raw_events_second.value_1 FROM public.raw_events_first_13300002 raw_events_first, public.raw_events_second_13300006 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id)) foo WHERE (id IS NOT NULL) GROUP BY id ORDER BY id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg) SELECT id, sum(value_1) AS sum FROM (SELECT raw_events_second.user_id AS id, raw_events_second.value_1 FROM public.raw_events_first_13300003 raw_events_first, public.raw_events_second_13300007 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id)) foo WHERE (id IS NOT NULL) GROUP BY id ORDER BY id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_1_agg) SELECT foo.id, sum(foo.value_1) AS sum FROM (SELECT raw_events_second.user_id AS id, raw_events_second.value_1 FROM public.raw_events_first_13300000 raw_events_first, public.raw_events_second_13300004 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id)) foo WHERE (foo.id IS NOT NULL) GROUP BY foo.id ORDER BY foo.id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_1_agg) SELECT foo.id, sum(foo.value_1) AS sum FROM (SELECT raw_events_second.user_id AS id, raw_events_second.value_1 FROM public.raw_events_first_13300001 raw_events_first, public.raw_events_second_13300005 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id)) foo WHERE (foo.id IS NOT NULL) GROUP BY foo.id ORDER BY foo.id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg) SELECT foo.id, sum(foo.value_1) AS sum FROM (SELECT raw_events_second.user_id AS id, raw_events_second.value_1 FROM public.raw_events_first_13300002 raw_events_first, public.raw_events_second_13300006 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id)) foo WHERE (foo.id IS NOT NULL) GROUP BY foo.id ORDER BY foo.id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg) SELECT foo.id, sum(foo.value_1) AS sum FROM (SELECT raw_events_second.user_id AS id, raw_events_second.value_1 FROM public.raw_events_first_13300003 raw_events_first, public.raw_events_second_13300007 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id)) foo WHERE (foo.id IS NOT NULL) GROUP BY foo.id ORDER BY foo.id
|
||||
ERROR: duplicate key value violates unique constraint "agg_events_user_id_value_1_agg_key_xxxxxxx"
|
||||
-- subquery one more level depth
|
||||
INSERT INTO agg_events
|
||||
|
@ -294,10 +305,10 @@ FROM (SELECT SUM(raw_events_second.value_4) AS v4,
|
|||
WHERE raw_events_first.user_id = raw_events_second.user_id
|
||||
GROUP BY raw_events_second.user_id) AS foo
|
||||
ORDER BY id;
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_1_agg, value_4_agg) SELECT id, v1, v4 FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300000 raw_events_first, public.raw_events_second_13300004 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id) foo WHERE (id IS NOT NULL) ORDER BY id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_1_agg, value_4_agg) SELECT id, v1, v4 FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300001 raw_events_first, public.raw_events_second_13300005 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id) foo WHERE (id IS NOT NULL) ORDER BY id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg, value_4_agg) SELECT id, v1, v4 FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300002 raw_events_first, public.raw_events_second_13300006 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id) foo WHERE (id IS NOT NULL) ORDER BY id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg, value_4_agg) SELECT id, v1, v4 FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300003 raw_events_first, public.raw_events_second_13300007 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id) foo WHERE (id IS NOT NULL) ORDER BY id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_1_agg, value_4_agg) SELECT foo.id, foo.v1, foo.v4 FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300000 raw_events_first, public.raw_events_second_13300004 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id) foo WHERE (foo.id IS NOT NULL) ORDER BY foo.id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_1_agg, value_4_agg) SELECT foo.id, foo.v1, foo.v4 FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300001 raw_events_first, public.raw_events_second_13300005 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id) foo WHERE (foo.id IS NOT NULL) ORDER BY foo.id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg, value_4_agg) SELECT foo.id, foo.v1, foo.v4 FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300002 raw_events_first, public.raw_events_second_13300006 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id) foo WHERE (foo.id IS NOT NULL) ORDER BY foo.id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg, value_4_agg) SELECT foo.id, foo.v1, foo.v4 FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300003 raw_events_first, public.raw_events_second_13300007 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id) foo WHERE (foo.id IS NOT NULL) ORDER BY foo.id
|
||||
ERROR: duplicate key value violates unique constraint "agg_events_user_id_value_1_agg_key_xxxxxxx"
|
||||
\set VERBOSITY DEFAULT
|
||||
-- join between subqueries
|
||||
|
@ -356,10 +367,10 @@ FROM
|
|||
ON (f.id = f2.id)) as outer_most
|
||||
GROUP BY
|
||||
outer_most.id;
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_4_agg) SELECT id, max(value) AS max FROM (SELECT f2.id, f2.v4 AS value FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first_13300000 raw_events_first, public.reference_table_13300012 reference_table WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300000 raw_events_first, public.raw_events_second_13300004 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) OPERATOR(pg_catalog.>) (10)::numeric)) foo2) f2 ON ((f.id OPERATOR(pg_catalog.=) f2.id)))) outer_most WHERE (id IS NOT NULL) GROUP BY id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_4_agg) SELECT id, max(value) AS max FROM (SELECT f2.id, f2.v4 AS value FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first_13300001 raw_events_first, public.reference_table_13300012 reference_table WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300001 raw_events_first, public.raw_events_second_13300005 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) OPERATOR(pg_catalog.>) (10)::numeric)) foo2) f2 ON ((f.id OPERATOR(pg_catalog.=) f2.id)))) outer_most WHERE (id IS NOT NULL) GROUP BY id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_4_agg) SELECT id, max(value) AS max FROM (SELECT f2.id, f2.v4 AS value FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first_13300002 raw_events_first, public.reference_table_13300012 reference_table WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300002 raw_events_first, public.raw_events_second_13300006 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) OPERATOR(pg_catalog.>) (10)::numeric)) foo2) f2 ON ((f.id OPERATOR(pg_catalog.=) f2.id)))) outer_most WHERE (id IS NOT NULL) GROUP BY id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_4_agg) SELECT id, max(value) AS max FROM (SELECT f2.id, f2.v4 AS value FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first_13300003 raw_events_first, public.reference_table_13300012 reference_table WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300003 raw_events_first, public.raw_events_second_13300007 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) OPERATOR(pg_catalog.>) (10)::numeric)) foo2) f2 ON ((f.id OPERATOR(pg_catalog.=) f2.id)))) outer_most WHERE (id IS NOT NULL) GROUP BY id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_4_agg) SELECT outer_most.id, max(outer_most.value) AS max FROM (SELECT f2.id, f2.v4 AS value FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first_13300000 raw_events_first, public.reference_table_13300012 reference_table WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300000 raw_events_first, public.raw_events_second_13300004 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) OPERATOR(pg_catalog.>) (10)::numeric)) foo2) f2 ON ((f.id OPERATOR(pg_catalog.=) f2.id)))) outer_most WHERE (outer_most.id IS NOT NULL) GROUP BY outer_most.id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_4_agg) SELECT outer_most.id, max(outer_most.value) AS max FROM (SELECT f2.id, f2.v4 AS value FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first_13300001 raw_events_first, public.reference_table_13300012 reference_table WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300001 raw_events_first, public.raw_events_second_13300005 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) OPERATOR(pg_catalog.>) (10)::numeric)) foo2) f2 ON ((f.id OPERATOR(pg_catalog.=) f2.id)))) outer_most WHERE (outer_most.id IS NOT NULL) GROUP BY outer_most.id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_4_agg) SELECT outer_most.id, max(outer_most.value) AS max FROM (SELECT f2.id, f2.v4 AS value FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first_13300002 raw_events_first, public.reference_table_13300012 reference_table WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300002 raw_events_first, public.raw_events_second_13300006 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) OPERATOR(pg_catalog.>) (10)::numeric)) foo2) f2 ON ((f.id OPERATOR(pg_catalog.=) f2.id)))) outer_most WHERE (outer_most.id IS NOT NULL) GROUP BY outer_most.id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_4_agg) SELECT outer_most.id, max(outer_most.value) AS max FROM (SELECT f2.id, f2.v4 AS value FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first_13300003 raw_events_first, public.reference_table_13300012 reference_table WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300003 raw_events_first, public.raw_events_second_13300007 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) OPERATOR(pg_catalog.>) (10)::numeric)) foo2) f2 ON ((f.id OPERATOR(pg_catalog.=) f2.id)))) outer_most WHERE (outer_most.id IS NOT NULL) GROUP BY outer_most.id
|
||||
-- subqueries in WHERE clause
|
||||
INSERT INTO raw_events_second
|
||||
(user_id)
|
||||
|
@ -371,7 +382,7 @@ WHERE user_id IN (SELECT user_id
|
|||
DEBUG: Skipping target shard interval 13300004 since SELECT query for it pruned away
|
||||
DEBUG: Skipping target shard interval 13300005 since SELECT query for it pruned away
|
||||
DEBUG: Skipping target shard interval 13300006 since SELECT query for it pruned away
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300003 raw_events_first WHERE ((user_id OPERATOR(pg_catalog.=) ANY (SELECT raw_events_second.user_id FROM public.raw_events_second_13300007 raw_events_second WHERE (raw_events_second.user_id OPERATOR(pg_catalog.=) 2))) AND (user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM public.raw_events_first_13300003 raw_events_first WHERE ((raw_events_first.user_id OPERATOR(pg_catalog.=) ANY (SELECT raw_events_second.user_id FROM public.raw_events_second_13300007 raw_events_second WHERE (raw_events_second.user_id OPERATOR(pg_catalog.=) 2))) AND (raw_events_first.user_id IS NOT NULL))
|
||||
INSERT INTO raw_events_second
|
||||
(user_id)
|
||||
SELECT user_id
|
||||
|
@ -380,10 +391,10 @@ WHERE user_id IN (SELECT user_id
|
|||
FROM raw_events_second
|
||||
WHERE user_id != 2 AND value_1 = 2000)
|
||||
ON conflict (user_id, value_1) DO NOTHING;
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300004 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300000 raw_events_first WHERE ((user_id OPERATOR(pg_catalog.=) ANY (SELECT raw_events_second.user_id FROM public.raw_events_second_13300004 raw_events_second WHERE ((raw_events_second.user_id OPERATOR(pg_catalog.<>) 2) AND (raw_events_second.value_1 OPERATOR(pg_catalog.=) 2000)))) AND (user_id IS NOT NULL)) ON CONFLICT(user_id, value_1) DO NOTHING
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300001 raw_events_first WHERE ((user_id OPERATOR(pg_catalog.=) ANY (SELECT raw_events_second.user_id FROM public.raw_events_second_13300005 raw_events_second WHERE ((raw_events_second.user_id OPERATOR(pg_catalog.<>) 2) AND (raw_events_second.value_1 OPERATOR(pg_catalog.=) 2000)))) AND (user_id IS NOT NULL)) ON CONFLICT(user_id, value_1) DO NOTHING
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300006 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300002 raw_events_first WHERE ((user_id OPERATOR(pg_catalog.=) ANY (SELECT raw_events_second.user_id FROM public.raw_events_second_13300006 raw_events_second WHERE ((raw_events_second.user_id OPERATOR(pg_catalog.<>) 2) AND (raw_events_second.value_1 OPERATOR(pg_catalog.=) 2000)))) AND (user_id IS NOT NULL)) ON CONFLICT(user_id, value_1) DO NOTHING
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300003 raw_events_first WHERE ((user_id OPERATOR(pg_catalog.=) ANY (SELECT raw_events_second.user_id FROM public.raw_events_second_13300007 raw_events_second WHERE ((raw_events_second.user_id OPERATOR(pg_catalog.<>) 2) AND (raw_events_second.value_1 OPERATOR(pg_catalog.=) 2000)))) AND (user_id IS NOT NULL)) ON CONFLICT(user_id, value_1) DO NOTHING
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300004 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM public.raw_events_first_13300000 raw_events_first WHERE ((raw_events_first.user_id OPERATOR(pg_catalog.=) ANY (SELECT raw_events_second.user_id FROM public.raw_events_second_13300004 raw_events_second WHERE ((raw_events_second.user_id OPERATOR(pg_catalog.<>) 2) AND (raw_events_second.value_1 OPERATOR(pg_catalog.=) 2000)))) AND (raw_events_first.user_id IS NOT NULL)) ON CONFLICT(user_id, value_1) DO NOTHING
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM public.raw_events_first_13300001 raw_events_first WHERE ((raw_events_first.user_id OPERATOR(pg_catalog.=) ANY (SELECT raw_events_second.user_id FROM public.raw_events_second_13300005 raw_events_second WHERE ((raw_events_second.user_id OPERATOR(pg_catalog.<>) 2) AND (raw_events_second.value_1 OPERATOR(pg_catalog.=) 2000)))) AND (raw_events_first.user_id IS NOT NULL)) ON CONFLICT(user_id, value_1) DO NOTHING
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300006 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM public.raw_events_first_13300002 raw_events_first WHERE ((raw_events_first.user_id OPERATOR(pg_catalog.=) ANY (SELECT raw_events_second.user_id FROM public.raw_events_second_13300006 raw_events_second WHERE ((raw_events_second.user_id OPERATOR(pg_catalog.<>) 2) AND (raw_events_second.value_1 OPERATOR(pg_catalog.=) 2000)))) AND (raw_events_first.user_id IS NOT NULL)) ON CONFLICT(user_id, value_1) DO NOTHING
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM public.raw_events_first_13300003 raw_events_first WHERE ((raw_events_first.user_id OPERATOR(pg_catalog.=) ANY (SELECT raw_events_second.user_id FROM public.raw_events_second_13300007 raw_events_second WHERE ((raw_events_second.user_id OPERATOR(pg_catalog.<>) 2) AND (raw_events_second.value_1 OPERATOR(pg_catalog.=) 2000)))) AND (raw_events_first.user_id IS NOT NULL)) ON CONFLICT(user_id, value_1) DO NOTHING
|
||||
INSERT INTO raw_events_second
|
||||
(user_id)
|
||||
SELECT user_id
|
||||
|
@ -401,10 +412,10 @@ FROM raw_events_first
|
|||
WHERE user_id IN (SELECT user_id
|
||||
FROM raw_events_second
|
||||
WHERE value_1 = 1000 OR value_1 = 2000 OR value_1 = 3000);
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300004 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300000 raw_events_first WHERE ((user_id OPERATOR(pg_catalog.=) ANY (SELECT raw_events_second.user_id FROM public.raw_events_second_13300004 raw_events_second WHERE ((raw_events_second.value_1 OPERATOR(pg_catalog.=) 1000) OR (raw_events_second.value_1 OPERATOR(pg_catalog.=) 2000) OR (raw_events_second.value_1 OPERATOR(pg_catalog.=) 3000)))) AND (user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300001 raw_events_first WHERE ((user_id OPERATOR(pg_catalog.=) ANY (SELECT raw_events_second.user_id FROM public.raw_events_second_13300005 raw_events_second WHERE ((raw_events_second.value_1 OPERATOR(pg_catalog.=) 1000) OR (raw_events_second.value_1 OPERATOR(pg_catalog.=) 2000) OR (raw_events_second.value_1 OPERATOR(pg_catalog.=) 3000)))) AND (user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300006 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300002 raw_events_first WHERE ((user_id OPERATOR(pg_catalog.=) ANY (SELECT raw_events_second.user_id FROM public.raw_events_second_13300006 raw_events_second WHERE ((raw_events_second.value_1 OPERATOR(pg_catalog.=) 1000) OR (raw_events_second.value_1 OPERATOR(pg_catalog.=) 2000) OR (raw_events_second.value_1 OPERATOR(pg_catalog.=) 3000)))) AND (user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300003 raw_events_first WHERE ((user_id OPERATOR(pg_catalog.=) ANY (SELECT raw_events_second.user_id FROM public.raw_events_second_13300007 raw_events_second WHERE ((raw_events_second.value_1 OPERATOR(pg_catalog.=) 1000) OR (raw_events_second.value_1 OPERATOR(pg_catalog.=) 2000) OR (raw_events_second.value_1 OPERATOR(pg_catalog.=) 3000)))) AND (user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300004 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM public.raw_events_first_13300000 raw_events_first WHERE ((raw_events_first.user_id OPERATOR(pg_catalog.=) ANY (SELECT raw_events_second.user_id FROM public.raw_events_second_13300004 raw_events_second WHERE ((raw_events_second.value_1 OPERATOR(pg_catalog.=) 1000) OR (raw_events_second.value_1 OPERATOR(pg_catalog.=) 2000) OR (raw_events_second.value_1 OPERATOR(pg_catalog.=) 3000)))) AND (raw_events_first.user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM public.raw_events_first_13300001 raw_events_first WHERE ((raw_events_first.user_id OPERATOR(pg_catalog.=) ANY (SELECT raw_events_second.user_id FROM public.raw_events_second_13300005 raw_events_second WHERE ((raw_events_second.value_1 OPERATOR(pg_catalog.=) 1000) OR (raw_events_second.value_1 OPERATOR(pg_catalog.=) 2000) OR (raw_events_second.value_1 OPERATOR(pg_catalog.=) 3000)))) AND (raw_events_first.user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300006 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM public.raw_events_first_13300002 raw_events_first WHERE ((raw_events_first.user_id OPERATOR(pg_catalog.=) ANY (SELECT raw_events_second.user_id FROM public.raw_events_second_13300006 raw_events_second WHERE ((raw_events_second.value_1 OPERATOR(pg_catalog.=) 1000) OR (raw_events_second.value_1 OPERATOR(pg_catalog.=) 2000) OR (raw_events_second.value_1 OPERATOR(pg_catalog.=) 3000)))) AND (raw_events_first.user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM public.raw_events_first_13300003 raw_events_first WHERE ((raw_events_first.user_id OPERATOR(pg_catalog.=) ANY (SELECT raw_events_second.user_id FROM public.raw_events_second_13300007 raw_events_second WHERE ((raw_events_second.value_1 OPERATOR(pg_catalog.=) 1000) OR (raw_events_second.value_1 OPERATOR(pg_catalog.=) 2000) OR (raw_events_second.value_1 OPERATOR(pg_catalog.=) 3000)))) AND (raw_events_first.user_id IS NOT NULL))
|
||||
-- lets mix subqueries in FROM clause and subqueries in WHERE
|
||||
INSERT INTO agg_events
|
||||
(user_id)
|
||||
|
@ -449,10 +460,10 @@ ON conflict (user_id, value_1_agg)
|
|||
DO UPDATE
|
||||
SET agg_time = EXCLUDED.agg_time
|
||||
WHERE ae.agg_time < EXCLUDED.agg_time;
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS ae (user_id, value_1_agg, agg_time) SELECT user_id, value_1, "time" FROM public.raw_events_first_13300000 raw_events_first WHERE (user_id IS NOT NULL) ON CONFLICT(user_id, value_1_agg) DO UPDATE SET agg_time = excluded.agg_time WHERE (ae.agg_time OPERATOR(pg_catalog.<) excluded.agg_time)
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS ae (user_id, value_1_agg, agg_time) SELECT user_id, value_1, "time" FROM public.raw_events_first_13300001 raw_events_first WHERE (user_id IS NOT NULL) ON CONFLICT(user_id, value_1_agg) DO UPDATE SET agg_time = excluded.agg_time WHERE (ae.agg_time OPERATOR(pg_catalog.<) excluded.agg_time)
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS ae (user_id, value_1_agg, agg_time) SELECT user_id, value_1, "time" FROM public.raw_events_first_13300002 raw_events_first WHERE (user_id IS NOT NULL) ON CONFLICT(user_id, value_1_agg) DO UPDATE SET agg_time = excluded.agg_time WHERE (ae.agg_time OPERATOR(pg_catalog.<) excluded.agg_time)
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS ae (user_id, value_1_agg, agg_time) SELECT user_id, value_1, "time" FROM public.raw_events_first_13300003 raw_events_first WHERE (user_id IS NOT NULL) ON CONFLICT(user_id, value_1_agg) DO UPDATE SET agg_time = excluded.agg_time WHERE (ae.agg_time OPERATOR(pg_catalog.<) excluded.agg_time)
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS ae (user_id, value_1_agg, agg_time) SELECT raw_events_first.user_id, raw_events_first.value_1, raw_events_first."time" FROM public.raw_events_first_13300000 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL) ON CONFLICT(user_id, value_1_agg) DO UPDATE SET agg_time = excluded.agg_time WHERE (ae.agg_time OPERATOR(pg_catalog.<) excluded.agg_time)
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS ae (user_id, value_1_agg, agg_time) SELECT raw_events_first.user_id, raw_events_first.value_1, raw_events_first."time" FROM public.raw_events_first_13300001 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL) ON CONFLICT(user_id, value_1_agg) DO UPDATE SET agg_time = excluded.agg_time WHERE (ae.agg_time OPERATOR(pg_catalog.<) excluded.agg_time)
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS ae (user_id, value_1_agg, agg_time) SELECT raw_events_first.user_id, raw_events_first.value_1, raw_events_first."time" FROM public.raw_events_first_13300002 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL) ON CONFLICT(user_id, value_1_agg) DO UPDATE SET agg_time = excluded.agg_time WHERE (ae.agg_time OPERATOR(pg_catalog.<) excluded.agg_time)
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS ae (user_id, value_1_agg, agg_time) SELECT raw_events_first.user_id, raw_events_first.value_1, raw_events_first."time" FROM public.raw_events_first_13300003 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL) ON CONFLICT(user_id, value_1_agg) DO UPDATE SET agg_time = excluded.agg_time WHERE (ae.agg_time OPERATOR(pg_catalog.<) excluded.agg_time)
|
||||
-- upserts with returning
|
||||
INSERT INTO agg_events AS ae
|
||||
(
|
||||
|
@ -469,10 +480,10 @@ DO UPDATE
|
|||
SET agg_time = EXCLUDED.agg_time
|
||||
WHERE ae.agg_time < EXCLUDED.agg_time
|
||||
RETURNING user_id, value_1_agg;
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS ae (user_id, value_1_agg, agg_time) SELECT user_id, value_1, "time" FROM public.raw_events_first_13300000 raw_events_first WHERE (user_id IS NOT NULL) ON CONFLICT(user_id, value_1_agg) DO UPDATE SET agg_time = excluded.agg_time WHERE (ae.agg_time OPERATOR(pg_catalog.<) excluded.agg_time) RETURNING ae.user_id, ae.value_1_agg
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS ae (user_id, value_1_agg, agg_time) SELECT user_id, value_1, "time" FROM public.raw_events_first_13300001 raw_events_first WHERE (user_id IS NOT NULL) ON CONFLICT(user_id, value_1_agg) DO UPDATE SET agg_time = excluded.agg_time WHERE (ae.agg_time OPERATOR(pg_catalog.<) excluded.agg_time) RETURNING ae.user_id, ae.value_1_agg
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS ae (user_id, value_1_agg, agg_time) SELECT user_id, value_1, "time" FROM public.raw_events_first_13300002 raw_events_first WHERE (user_id IS NOT NULL) ON CONFLICT(user_id, value_1_agg) DO UPDATE SET agg_time = excluded.agg_time WHERE (ae.agg_time OPERATOR(pg_catalog.<) excluded.agg_time) RETURNING ae.user_id, ae.value_1_agg
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS ae (user_id, value_1_agg, agg_time) SELECT user_id, value_1, "time" FROM public.raw_events_first_13300003 raw_events_first WHERE (user_id IS NOT NULL) ON CONFLICT(user_id, value_1_agg) DO UPDATE SET agg_time = excluded.agg_time WHERE (ae.agg_time OPERATOR(pg_catalog.<) excluded.agg_time) RETURNING ae.user_id, ae.value_1_agg
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS ae (user_id, value_1_agg, agg_time) SELECT raw_events_first.user_id, raw_events_first.value_1, raw_events_first."time" FROM public.raw_events_first_13300000 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL) ON CONFLICT(user_id, value_1_agg) DO UPDATE SET agg_time = excluded.agg_time WHERE (ae.agg_time OPERATOR(pg_catalog.<) excluded.agg_time) RETURNING ae.user_id, ae.value_1_agg
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS ae (user_id, value_1_agg, agg_time) SELECT raw_events_first.user_id, raw_events_first.value_1, raw_events_first."time" FROM public.raw_events_first_13300001 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL) ON CONFLICT(user_id, value_1_agg) DO UPDATE SET agg_time = excluded.agg_time WHERE (ae.agg_time OPERATOR(pg_catalog.<) excluded.agg_time) RETURNING ae.user_id, ae.value_1_agg
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS ae (user_id, value_1_agg, agg_time) SELECT raw_events_first.user_id, raw_events_first.value_1, raw_events_first."time" FROM public.raw_events_first_13300002 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL) ON CONFLICT(user_id, value_1_agg) DO UPDATE SET agg_time = excluded.agg_time WHERE (ae.agg_time OPERATOR(pg_catalog.<) excluded.agg_time) RETURNING ae.user_id, ae.value_1_agg
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS ae (user_id, value_1_agg, agg_time) SELECT raw_events_first.user_id, raw_events_first.value_1, raw_events_first."time" FROM public.raw_events_first_13300003 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL) ON CONFLICT(user_id, value_1_agg) DO UPDATE SET agg_time = excluded.agg_time WHERE (ae.agg_time OPERATOR(pg_catalog.<) excluded.agg_time) RETURNING ae.user_id, ae.value_1_agg
|
||||
user_id | value_1_agg
|
||||
---------------------------------------------------------------------
|
||||
7 |
|
||||
|
@ -483,20 +494,20 @@ SELECT
|
|||
user_id, sum(value_1 + value_2)
|
||||
FROM
|
||||
raw_events_first GROUP BY user_id;
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_1_agg) SELECT user_id, sum((value_1 OPERATOR(pg_catalog.+) value_2)) AS sum FROM public.raw_events_first_13300000 raw_events_first WHERE (user_id IS NOT NULL) GROUP BY user_id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_1_agg) SELECT user_id, sum((value_1 OPERATOR(pg_catalog.+) value_2)) AS sum FROM public.raw_events_first_13300001 raw_events_first WHERE (user_id IS NOT NULL) GROUP BY user_id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg) SELECT user_id, sum((value_1 OPERATOR(pg_catalog.+) value_2)) AS sum FROM public.raw_events_first_13300002 raw_events_first WHERE (user_id IS NOT NULL) GROUP BY user_id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg) SELECT user_id, sum((value_1 OPERATOR(pg_catalog.+) value_2)) AS sum FROM public.raw_events_first_13300003 raw_events_first WHERE (user_id IS NOT NULL) GROUP BY user_id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_1_agg) SELECT raw_events_first.user_id, sum((raw_events_first.value_1 OPERATOR(pg_catalog.+) raw_events_first.value_2)) AS sum FROM public.raw_events_first_13300000 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL) GROUP BY raw_events_first.user_id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_1_agg) SELECT raw_events_first.user_id, sum((raw_events_first.value_1 OPERATOR(pg_catalog.+) raw_events_first.value_2)) AS sum FROM public.raw_events_first_13300001 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL) GROUP BY raw_events_first.user_id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg) SELECT raw_events_first.user_id, sum((raw_events_first.value_1 OPERATOR(pg_catalog.+) raw_events_first.value_2)) AS sum FROM public.raw_events_first_13300002 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL) GROUP BY raw_events_first.user_id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg) SELECT raw_events_first.user_id, sum((raw_events_first.value_1 OPERATOR(pg_catalog.+) raw_events_first.value_2)) AS sum FROM public.raw_events_first_13300003 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL) GROUP BY raw_events_first.user_id
|
||||
-- FILTER CLAUSE
|
||||
INSERT INTO agg_events (user_id, value_1_agg)
|
||||
SELECT
|
||||
user_id, sum(value_1 + value_2) FILTER (where value_3 = 15)
|
||||
FROM
|
||||
raw_events_first GROUP BY user_id;
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_1_agg) SELECT user_id, sum((value_1 OPERATOR(pg_catalog.+) value_2)) FILTER (WHERE (value_3 OPERATOR(pg_catalog.=) (15)::double precision)) AS sum FROM public.raw_events_first_13300000 raw_events_first WHERE (user_id IS NOT NULL) GROUP BY user_id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_1_agg) SELECT user_id, sum((value_1 OPERATOR(pg_catalog.+) value_2)) FILTER (WHERE (value_3 OPERATOR(pg_catalog.=) (15)::double precision)) AS sum FROM public.raw_events_first_13300001 raw_events_first WHERE (user_id IS NOT NULL) GROUP BY user_id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg) SELECT user_id, sum((value_1 OPERATOR(pg_catalog.+) value_2)) FILTER (WHERE (value_3 OPERATOR(pg_catalog.=) (15)::double precision)) AS sum FROM public.raw_events_first_13300002 raw_events_first WHERE (user_id IS NOT NULL) GROUP BY user_id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg) SELECT user_id, sum((value_1 OPERATOR(pg_catalog.+) value_2)) FILTER (WHERE (value_3 OPERATOR(pg_catalog.=) (15)::double precision)) AS sum FROM public.raw_events_first_13300003 raw_events_first WHERE (user_id IS NOT NULL) GROUP BY user_id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_1_agg) SELECT raw_events_first.user_id, sum((raw_events_first.value_1 OPERATOR(pg_catalog.+) raw_events_first.value_2)) FILTER (WHERE (raw_events_first.value_3 OPERATOR(pg_catalog.=) (15)::double precision)) AS sum FROM public.raw_events_first_13300000 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL) GROUP BY raw_events_first.user_id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_1_agg) SELECT raw_events_first.user_id, sum((raw_events_first.value_1 OPERATOR(pg_catalog.+) raw_events_first.value_2)) FILTER (WHERE (raw_events_first.value_3 OPERATOR(pg_catalog.=) (15)::double precision)) AS sum FROM public.raw_events_first_13300001 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL) GROUP BY raw_events_first.user_id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg) SELECT raw_events_first.user_id, sum((raw_events_first.value_1 OPERATOR(pg_catalog.+) raw_events_first.value_2)) FILTER (WHERE (raw_events_first.value_3 OPERATOR(pg_catalog.=) (15)::double precision)) AS sum FROM public.raw_events_first_13300002 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL) GROUP BY raw_events_first.user_id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg) SELECT raw_events_first.user_id, sum((raw_events_first.value_1 OPERATOR(pg_catalog.+) raw_events_first.value_2)) FILTER (WHERE (raw_events_first.value_3 OPERATOR(pg_catalog.=) (15)::double precision)) AS sum FROM public.raw_events_first_13300003 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL) GROUP BY raw_events_first.user_id
|
||||
-- a test with reference table JOINs
|
||||
INSERT INTO
|
||||
agg_events (user_id, value_1_agg)
|
||||
|
@ -591,10 +602,10 @@ INSERT INTO agg_events (value_1_agg, user_id)
|
|||
DISTINCT value_1, user_id
|
||||
FROM
|
||||
raw_events_first;
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_1_agg) SELECT DISTINCT user_id, value_1 FROM public.raw_events_first_13300000 raw_events_first WHERE (user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_1_agg) SELECT DISTINCT user_id, value_1 FROM public.raw_events_first_13300001 raw_events_first WHERE (user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg) SELECT DISTINCT user_id, value_1 FROM public.raw_events_first_13300002 raw_events_first WHERE (user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg) SELECT DISTINCT user_id, value_1 FROM public.raw_events_first_13300003 raw_events_first WHERE (user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_1_agg) SELECT DISTINCT raw_events_first.user_id, raw_events_first.value_1 FROM public.raw_events_first_13300000 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_1_agg) SELECT DISTINCT raw_events_first.user_id, raw_events_first.value_1 FROM public.raw_events_first_13300001 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg) SELECT DISTINCT raw_events_first.user_id, raw_events_first.value_1 FROM public.raw_events_first_13300002 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg) SELECT DISTINCT raw_events_first.user_id, raw_events_first.value_1 FROM public.raw_events_first_13300003 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL)
|
||||
-- we don't want to see constraint violations, so truncate first
|
||||
SET client_min_messages TO INFO;
|
||||
truncate agg_events;
|
||||
|
@ -636,10 +647,10 @@ INSERT INTO agg_events (value_1_agg, user_id)
|
|||
DISTINCT ON (user_id) value_1, user_id
|
||||
FROM
|
||||
raw_events_first;
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_1_agg) SELECT DISTINCT ON (user_id) user_id, value_1 FROM public.raw_events_first_13300000 raw_events_first WHERE (user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_1_agg) SELECT DISTINCT ON (user_id) user_id, value_1 FROM public.raw_events_first_13300001 raw_events_first WHERE (user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg) SELECT DISTINCT ON (user_id) user_id, value_1 FROM public.raw_events_first_13300002 raw_events_first WHERE (user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg) SELECT DISTINCT ON (user_id) user_id, value_1 FROM public.raw_events_first_13300003 raw_events_first WHERE (user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_1_agg) SELECT DISTINCT ON (raw_events_first.user_id) raw_events_first.user_id, raw_events_first.value_1 FROM public.raw_events_first_13300000 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_1_agg) SELECT DISTINCT ON (raw_events_first.user_id) raw_events_first.user_id, raw_events_first.value_1 FROM public.raw_events_first_13300001 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg) SELECT DISTINCT ON (raw_events_first.user_id) raw_events_first.user_id, raw_events_first.value_1 FROM public.raw_events_first_13300002 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg) SELECT DISTINCT ON (raw_events_first.user_id) raw_events_first.user_id, raw_events_first.value_1 FROM public.raw_events_first_13300003 raw_events_first WHERE (raw_events_first.user_id IS NOT NULL)
|
||||
SELECT user_id, value_1_agg FROM agg_events ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
user_id | value_1_agg
|
||||
|
@ -685,10 +696,10 @@ DEBUG: Subqueries without relations are not allowed in distributed INSERT ... S
|
|||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: performing repartitioned INSERT ... SELECT
|
||||
DEBUG: partitioning SELECT query by column index 0 with name 'user_id'
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_1_agg) SELECT user_id, value_1_agg FROM read_intermediate_results('{repartitioned_results_xxxxx_from_13300000_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1_agg integer)
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_1_agg) SELECT user_id, value_1_agg FROM read_intermediate_results('{repartitioned_results_xxxxx_from_13300001_to_1}'::text[], 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1_agg integer)
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg) SELECT user_id, value_1_agg FROM read_intermediate_results('{repartitioned_results_xxxxx_from_13300002_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1_agg integer)
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg) SELECT user_id, value_1_agg FROM read_intermediate_results('{repartitioned_results_xxxxx_from_13300003_to_3}'::text[], 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1_agg integer)
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_1_agg) SELECT intermediate_result.user_id, intermediate_result.value_1_agg FROM read_intermediate_results('{repartitioned_results_xxxxx_from_13300000_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1_agg integer)
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_1_agg) SELECT intermediate_result.user_id, intermediate_result.value_1_agg FROM read_intermediate_results('{repartitioned_results_xxxxx_from_13300001_to_1}'::text[], 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1_agg integer)
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg) SELECT intermediate_result.user_id, intermediate_result.value_1_agg FROM read_intermediate_results('{repartitioned_results_xxxxx_from_13300002_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1_agg integer)
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg) SELECT intermediate_result.user_id, intermediate_result.value_1_agg FROM read_intermediate_results('{repartitioned_results_xxxxx_from_13300003_to_3}'::text[], 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1_agg integer)
|
||||
-- We support set operations via the coordinator
|
||||
BEGIN;
|
||||
INSERT INTO
|
||||
|
@ -702,10 +713,10 @@ DEBUG: Set operations are not allowed in distributed INSERT ... SELECT queries
|
|||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: performing repartitioned INSERT ... SELECT
|
||||
DEBUG: partitioning SELECT query by column index 0 with name 'user_id'
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300000 AS citus_table_alias (user_id) SELECT user_id FROM read_intermediate_results('{repartitioned_results_xxxxx_from_13300004_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(user_id integer)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300001 AS citus_table_alias (user_id) SELECT user_id FROM read_intermediate_results('{repartitioned_results_xxxxx_from_13300005_to_1}'::text[], 'binary'::citus_copy_format) intermediate_result(user_id integer)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300002 AS citus_table_alias (user_id) SELECT user_id FROM read_intermediate_results('{repartitioned_results_xxxxx_from_13300006_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(user_id integer)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300003 AS citus_table_alias (user_id) SELECT user_id FROM read_intermediate_results('{repartitioned_results_xxxxx_from_13300007_to_3}'::text[], 'binary'::citus_copy_format) intermediate_result(user_id integer)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300000 AS citus_table_alias (user_id) SELECT intermediate_result.user_id FROM read_intermediate_results('{repartitioned_results_xxxxx_from_13300004_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(user_id integer)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300001 AS citus_table_alias (user_id) SELECT intermediate_result.user_id FROM read_intermediate_results('{repartitioned_results_xxxxx_from_13300005_to_1}'::text[], 'binary'::citus_copy_format) intermediate_result(user_id integer)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300002 AS citus_table_alias (user_id) SELECT intermediate_result.user_id FROM read_intermediate_results('{repartitioned_results_xxxxx_from_13300006_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(user_id integer)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300003 AS citus_table_alias (user_id) SELECT intermediate_result.user_id FROM read_intermediate_results('{repartitioned_results_xxxxx_from_13300007_to_3}'::text[], 'binary'::citus_copy_format) intermediate_result(user_id integer)
|
||||
ROLLBACK;
|
||||
-- We do support set operations through recursive planning
|
||||
BEGIN;
|
||||
|
@ -1174,10 +1185,10 @@ HINT: Ensure the target table's partition column has a corresponding simple col
|
|||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: performing repartitioned INSERT ... SELECT
|
||||
DEBUG: partitioning SELECT query by column index 0 with name 'user_id'
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300004 AS citus_table_alias (user_id) SELECT user_id FROM read_intermediate_results('{repartitioned_results_xxxxx_from_13300000_to_0,repartitioned_results_xxxxx_from_13300001_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(user_id integer)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS citus_table_alias (user_id) SELECT user_id FROM read_intermediate_results('{repartitioned_results_xxxxx_from_13300000_to_1,repartitioned_results_xxxxx_from_13300001_to_1,repartitioned_results_xxxxx_from_13300003_to_1}'::text[], 'binary'::citus_copy_format) intermediate_result(user_id integer)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300006 AS citus_table_alias (user_id) SELECT user_id FROM read_intermediate_results('{repartitioned_results_xxxxx_from_13300001_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(user_id integer)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id) SELECT user_id FROM read_intermediate_results('{repartitioned_results_xxxxx_from_13300000_to_3,repartitioned_results_xxxxx_from_13300002_to_3,repartitioned_results_xxxxx_from_13300003_to_3}'::text[], 'binary'::citus_copy_format) intermediate_result(user_id integer)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300004 AS citus_table_alias (user_id) SELECT intermediate_result.user_id FROM read_intermediate_results('{repartitioned_results_xxxxx_from_13300000_to_0,repartitioned_results_xxxxx_from_13300001_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(user_id integer)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS citus_table_alias (user_id) SELECT intermediate_result.user_id FROM read_intermediate_results('{repartitioned_results_xxxxx_from_13300000_to_1,repartitioned_results_xxxxx_from_13300001_to_1,repartitioned_results_xxxxx_from_13300003_to_1}'::text[], 'binary'::citus_copy_format) intermediate_result(user_id integer)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300006 AS citus_table_alias (user_id) SELECT intermediate_result.user_id FROM read_intermediate_results('{repartitioned_results_xxxxx_from_13300001_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(user_id integer)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id) SELECT intermediate_result.user_id FROM read_intermediate_results('{repartitioned_results_xxxxx_from_13300000_to_3,repartitioned_results_xxxxx_from_13300002_to_3,repartitioned_results_xxxxx_from_13300003_to_3}'::text[], 'binary'::citus_copy_format) intermediate_result(user_id integer)
|
||||
INSERT INTO raw_events_second
|
||||
(user_id)
|
||||
SELECT user_id :: bigint
|
||||
|
@ -1188,10 +1199,10 @@ HINT: Ensure the target table's partition column has a corresponding simple col
|
|||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: performing repartitioned INSERT ... SELECT
|
||||
DEBUG: partitioning SELECT query by column index 0 with name 'user_id'
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300004 AS citus_table_alias (user_id) SELECT user_id FROM read_intermediate_results('{repartitioned_results_xxxxx_from_13300000_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(user_id integer)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS citus_table_alias (user_id) SELECT user_id FROM read_intermediate_results('{repartitioned_results_xxxxx_from_13300001_to_1}'::text[], 'binary'::citus_copy_format) intermediate_result(user_id integer)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300006 AS citus_table_alias (user_id) SELECT user_id FROM read_intermediate_results('{repartitioned_results_xxxxx_from_13300002_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(user_id integer)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id) SELECT user_id FROM read_intermediate_results('{repartitioned_results_xxxxx_from_13300003_to_3}'::text[], 'binary'::citus_copy_format) intermediate_result(user_id integer)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300004 AS citus_table_alias (user_id) SELECT intermediate_result.user_id FROM read_intermediate_results('{repartitioned_results_xxxxx_from_13300000_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(user_id integer)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS citus_table_alias (user_id) SELECT intermediate_result.user_id FROM read_intermediate_results('{repartitioned_results_xxxxx_from_13300001_to_1}'::text[], 'binary'::citus_copy_format) intermediate_result(user_id integer)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300006 AS citus_table_alias (user_id) SELECT intermediate_result.user_id FROM read_intermediate_results('{repartitioned_results_xxxxx_from_13300002_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(user_id integer)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id) SELECT intermediate_result.user_id FROM read_intermediate_results('{repartitioned_results_xxxxx_from_13300003_to_3}'::text[], 'binary'::citus_copy_format) intermediate_result(user_id integer)
|
||||
INSERT INTO agg_events
|
||||
(value_3_agg,
|
||||
value_4_agg,
|
||||
|
@ -1637,7 +1648,7 @@ FROM raw_events_first
|
|||
WHERE user_id IN (SELECT raw_events_second.user_id
|
||||
FROM raw_events_second, raw_events_first
|
||||
WHERE raw_events_second.user_id = raw_events_first.user_id AND raw_events_first.user_id = 200);
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300004 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300000 raw_events_first WHERE ((user_id OPERATOR(pg_catalog.=) ANY (SELECT raw_events_second.user_id FROM public.raw_events_second_13300004 raw_events_second, public.raw_events_first_13300000 raw_events_first_1 WHERE ((raw_events_second.user_id OPERATOR(pg_catalog.=) raw_events_first_1.user_id) AND (raw_events_first_1.user_id OPERATOR(pg_catalog.=) 200)))) AND (user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300004 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM public.raw_events_first_13300000 raw_events_first WHERE ((raw_events_first.user_id OPERATOR(pg_catalog.=) ANY (SELECT raw_events_second.user_id FROM public.raw_events_second_13300004 raw_events_second, public.raw_events_first_13300000 raw_events_first_1 WHERE ((raw_events_second.user_id OPERATOR(pg_catalog.=) raw_events_first_1.user_id) AND (raw_events_first_1.user_id OPERATOR(pg_catalog.=) 200)))) AND (raw_events_first.user_id IS NOT NULL))
|
||||
DEBUG: Skipping target shard interval 13300005 since SELECT query for it pruned away
|
||||
DEBUG: Skipping target shard interval 13300006 since SELECT query for it pruned away
|
||||
DEBUG: Skipping target shard interval 13300007 since SELECT query for it pruned away
|
||||
|
@ -1673,10 +1684,10 @@ FROM raw_events_first
|
|||
WHERE EXISTS (SELECT 1
|
||||
FROM raw_events_second
|
||||
WHERE raw_events_second.user_id =raw_events_first.user_id);
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300004 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300000 raw_events_first WHERE ((EXISTS (SELECT 1 FROM public.raw_events_second_13300004 raw_events_second WHERE (raw_events_second.user_id OPERATOR(pg_catalog.=) raw_events_first.user_id))) AND (user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300001 raw_events_first WHERE ((EXISTS (SELECT 1 FROM public.raw_events_second_13300005 raw_events_second WHERE (raw_events_second.user_id OPERATOR(pg_catalog.=) raw_events_first.user_id))) AND (user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300006 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300002 raw_events_first WHERE ((EXISTS (SELECT 1 FROM public.raw_events_second_13300006 raw_events_second WHERE (raw_events_second.user_id OPERATOR(pg_catalog.=) raw_events_first.user_id))) AND (user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300003 raw_events_first WHERE ((EXISTS (SELECT 1 FROM public.raw_events_second_13300007 raw_events_second WHERE (raw_events_second.user_id OPERATOR(pg_catalog.=) raw_events_first.user_id))) AND (user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300004 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM public.raw_events_first_13300000 raw_events_first WHERE ((EXISTS (SELECT 1 FROM public.raw_events_second_13300004 raw_events_second WHERE (raw_events_second.user_id OPERATOR(pg_catalog.=) raw_events_first.user_id))) AND (raw_events_first.user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM public.raw_events_first_13300001 raw_events_first WHERE ((EXISTS (SELECT 1 FROM public.raw_events_second_13300005 raw_events_second WHERE (raw_events_second.user_id OPERATOR(pg_catalog.=) raw_events_first.user_id))) AND (raw_events_first.user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300006 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM public.raw_events_first_13300002 raw_events_first WHERE ((EXISTS (SELECT 1 FROM public.raw_events_second_13300006 raw_events_second WHERE (raw_events_second.user_id OPERATOR(pg_catalog.=) raw_events_first.user_id))) AND (raw_events_first.user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM public.raw_events_first_13300003 raw_events_first WHERE ((EXISTS (SELECT 1 FROM public.raw_events_second_13300007 raw_events_second WHERE (raw_events_second.user_id OPERATOR(pg_catalog.=) raw_events_first.user_id))) AND (raw_events_first.user_id IS NOT NULL))
|
||||
-- we cannot push down
|
||||
INSERT INTO raw_events_second
|
||||
(user_id)
|
||||
|
@ -1685,10 +1696,10 @@ FROM raw_events_first
|
|||
WHERE NOT EXISTS (SELECT 1
|
||||
FROM raw_events_second
|
||||
WHERE raw_events_second.user_id =raw_events_first.user_id);
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300004 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300000 raw_events_first WHERE ((NOT (EXISTS (SELECT 1 FROM public.raw_events_second_13300004 raw_events_second WHERE (raw_events_second.user_id OPERATOR(pg_catalog.=) raw_events_first.user_id)))) AND (user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300001 raw_events_first WHERE ((NOT (EXISTS (SELECT 1 FROM public.raw_events_second_13300005 raw_events_second WHERE (raw_events_second.user_id OPERATOR(pg_catalog.=) raw_events_first.user_id)))) AND (user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300006 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300002 raw_events_first WHERE ((NOT (EXISTS (SELECT 1 FROM public.raw_events_second_13300006 raw_events_second WHERE (raw_events_second.user_id OPERATOR(pg_catalog.=) raw_events_first.user_id)))) AND (user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300003 raw_events_first WHERE ((NOT (EXISTS (SELECT 1 FROM public.raw_events_second_13300007 raw_events_second WHERE (raw_events_second.user_id OPERATOR(pg_catalog.=) raw_events_first.user_id)))) AND (user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300004 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM public.raw_events_first_13300000 raw_events_first WHERE ((NOT (EXISTS (SELECT 1 FROM public.raw_events_second_13300004 raw_events_second WHERE (raw_events_second.user_id OPERATOR(pg_catalog.=) raw_events_first.user_id)))) AND (raw_events_first.user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM public.raw_events_first_13300001 raw_events_first WHERE ((NOT (EXISTS (SELECT 1 FROM public.raw_events_second_13300005 raw_events_second WHERE (raw_events_second.user_id OPERATOR(pg_catalog.=) raw_events_first.user_id)))) AND (raw_events_first.user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300006 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM public.raw_events_first_13300002 raw_events_first WHERE ((NOT (EXISTS (SELECT 1 FROM public.raw_events_second_13300006 raw_events_second WHERE (raw_events_second.user_id OPERATOR(pg_catalog.=) raw_events_first.user_id)))) AND (raw_events_first.user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM public.raw_events_first_13300003 raw_events_first WHERE ((NOT (EXISTS (SELECT 1 FROM public.raw_events_second_13300007 raw_events_second WHERE (raw_events_second.user_id OPERATOR(pg_catalog.=) raw_events_first.user_id)))) AND (raw_events_first.user_id IS NOT NULL))
|
||||
-- more complex LEFT JOINs
|
||||
INSERT INTO agg_events
|
||||
(user_id, value_4_agg)
|
||||
|
@ -1718,10 +1729,10 @@ DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS
|
|||
ON (f.id = f2.id)) as outer_most
|
||||
GROUP BY
|
||||
outer_most.id;
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_4_agg) SELECT id, max(value) AS max FROM (SELECT f2.id, f2.v4 AS value FROM ((SELECT foo.id FROM (SELECT raw_events_first.user_id AS id FROM (public.raw_events_first_13300000 raw_events_first LEFT JOIN public.reference_table_13300012 reference_table ON ((raw_events_first.user_id OPERATOR(pg_catalog.=) reference_table.user_id)))) foo) f LEFT JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300000 raw_events_first, public.raw_events_second_13300004 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) OPERATOR(pg_catalog.>) (10)::numeric)) foo2) f2 ON ((f.id OPERATOR(pg_catalog.=) f2.id)))) outer_most WHERE (id IS NOT NULL) GROUP BY id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_4_agg) SELECT id, max(value) AS max FROM (SELECT f2.id, f2.v4 AS value FROM ((SELECT foo.id FROM (SELECT raw_events_first.user_id AS id FROM (public.raw_events_first_13300001 raw_events_first LEFT JOIN public.reference_table_13300012 reference_table ON ((raw_events_first.user_id OPERATOR(pg_catalog.=) reference_table.user_id)))) foo) f LEFT JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300001 raw_events_first, public.raw_events_second_13300005 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) OPERATOR(pg_catalog.>) (10)::numeric)) foo2) f2 ON ((f.id OPERATOR(pg_catalog.=) f2.id)))) outer_most WHERE (id IS NOT NULL) GROUP BY id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_4_agg) SELECT id, max(value) AS max FROM (SELECT f2.id, f2.v4 AS value FROM ((SELECT foo.id FROM (SELECT raw_events_first.user_id AS id FROM (public.raw_events_first_13300002 raw_events_first LEFT JOIN public.reference_table_13300012 reference_table ON ((raw_events_first.user_id OPERATOR(pg_catalog.=) reference_table.user_id)))) foo) f LEFT JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300002 raw_events_first, public.raw_events_second_13300006 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) OPERATOR(pg_catalog.>) (10)::numeric)) foo2) f2 ON ((f.id OPERATOR(pg_catalog.=) f2.id)))) outer_most WHERE (id IS NOT NULL) GROUP BY id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_4_agg) SELECT id, max(value) AS max FROM (SELECT f2.id, f2.v4 AS value FROM ((SELECT foo.id FROM (SELECT raw_events_first.user_id AS id FROM (public.raw_events_first_13300003 raw_events_first LEFT JOIN public.reference_table_13300012 reference_table ON ((raw_events_first.user_id OPERATOR(pg_catalog.=) reference_table.user_id)))) foo) f LEFT JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300003 raw_events_first, public.raw_events_second_13300007 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) OPERATOR(pg_catalog.>) (10)::numeric)) foo2) f2 ON ((f.id OPERATOR(pg_catalog.=) f2.id)))) outer_most WHERE (id IS NOT NULL) GROUP BY id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_4_agg) SELECT outer_most.id, max(outer_most.value) AS max FROM (SELECT f2.id, f2.v4 AS value FROM ((SELECT foo.id FROM (SELECT raw_events_first.user_id AS id FROM (public.raw_events_first_13300000 raw_events_first LEFT JOIN public.reference_table_13300012 reference_table ON ((raw_events_first.user_id OPERATOR(pg_catalog.=) reference_table.user_id)))) foo) f LEFT JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300000 raw_events_first, public.raw_events_second_13300004 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) OPERATOR(pg_catalog.>) (10)::numeric)) foo2) f2 ON ((f.id OPERATOR(pg_catalog.=) f2.id)))) outer_most WHERE (outer_most.id IS NOT NULL) GROUP BY outer_most.id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_4_agg) SELECT outer_most.id, max(outer_most.value) AS max FROM (SELECT f2.id, f2.v4 AS value FROM ((SELECT foo.id FROM (SELECT raw_events_first.user_id AS id FROM (public.raw_events_first_13300001 raw_events_first LEFT JOIN public.reference_table_13300012 reference_table ON ((raw_events_first.user_id OPERATOR(pg_catalog.=) reference_table.user_id)))) foo) f LEFT JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300001 raw_events_first, public.raw_events_second_13300005 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) OPERATOR(pg_catalog.>) (10)::numeric)) foo2) f2 ON ((f.id OPERATOR(pg_catalog.=) f2.id)))) outer_most WHERE (outer_most.id IS NOT NULL) GROUP BY outer_most.id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_4_agg) SELECT outer_most.id, max(outer_most.value) AS max FROM (SELECT f2.id, f2.v4 AS value FROM ((SELECT foo.id FROM (SELECT raw_events_first.user_id AS id FROM (public.raw_events_first_13300002 raw_events_first LEFT JOIN public.reference_table_13300012 reference_table ON ((raw_events_first.user_id OPERATOR(pg_catalog.=) reference_table.user_id)))) foo) f LEFT JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300002 raw_events_first, public.raw_events_second_13300006 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) OPERATOR(pg_catalog.>) (10)::numeric)) foo2) f2 ON ((f.id OPERATOR(pg_catalog.=) f2.id)))) outer_most WHERE (outer_most.id IS NOT NULL) GROUP BY outer_most.id
|
||||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_4_agg) SELECT outer_most.id, max(outer_most.value) AS max FROM (SELECT f2.id, f2.v4 AS value FROM ((SELECT foo.id FROM (SELECT raw_events_first.user_id AS id FROM (public.raw_events_first_13300003 raw_events_first LEFT JOIN public.reference_table_13300012 reference_table ON ((raw_events_first.user_id OPERATOR(pg_catalog.=) reference_table.user_id)))) foo) f LEFT JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300003 raw_events_first, public.raw_events_second_13300007 raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) OPERATOR(pg_catalog.>) (10)::numeric)) foo2) f2 ON ((f.id OPERATOR(pg_catalog.=) f2.id)))) outer_most WHERE (outer_most.id IS NOT NULL) GROUP BY outer_most.id
|
||||
RESET client_min_messages;
|
||||
-- cannot push down since the f.id IN is matched with value_1
|
||||
-- we use repartition insert/select instead
|
||||
|
@ -1795,10 +1806,10 @@ FROM (SELECT SUM(raw_events_second.value_4) AS v4,
|
|||
ON (f.id = f2.id)
|
||||
WHERE f.id IN (SELECT user_id
|
||||
FROM raw_events_second));
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300004 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300000 raw_events_first WHERE ((user_id OPERATOR(pg_catalog.=) ANY (SELECT f2.id FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first_13300000 raw_events_first_1, public.reference_table_13300012 reference_table WHERE (raw_events_first_1.user_id OPERATOR(pg_catalog.=) reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first_1.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300000 raw_events_first_1, public.raw_events_second_13300004 raw_events_second WHERE (raw_events_first_1.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) OPERATOR(pg_catalog.>) (10)::numeric)) foo2) f2 ON ((f.id OPERATOR(pg_catalog.=) f2.id))) WHERE (f.id OPERATOR(pg_catalog.=) ANY (SELECT raw_events_second.user_id FROM public.raw_events_second_13300004 raw_events_second)))) AND (user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300001 raw_events_first WHERE ((user_id OPERATOR(pg_catalog.=) ANY (SELECT f2.id FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first_13300001 raw_events_first_1, public.reference_table_13300012 reference_table WHERE (raw_events_first_1.user_id OPERATOR(pg_catalog.=) reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first_1.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300001 raw_events_first_1, public.raw_events_second_13300005 raw_events_second WHERE (raw_events_first_1.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) OPERATOR(pg_catalog.>) (10)::numeric)) foo2) f2 ON ((f.id OPERATOR(pg_catalog.=) f2.id))) WHERE (f.id OPERATOR(pg_catalog.=) ANY (SELECT raw_events_second.user_id FROM public.raw_events_second_13300005 raw_events_second)))) AND (user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300006 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300002 raw_events_first WHERE ((user_id OPERATOR(pg_catalog.=) ANY (SELECT f2.id FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first_13300002 raw_events_first_1, public.reference_table_13300012 reference_table WHERE (raw_events_first_1.user_id OPERATOR(pg_catalog.=) reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first_1.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300002 raw_events_first_1, public.raw_events_second_13300006 raw_events_second WHERE (raw_events_first_1.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) OPERATOR(pg_catalog.>) (10)::numeric)) foo2) f2 ON ((f.id OPERATOR(pg_catalog.=) f2.id))) WHERE (f.id OPERATOR(pg_catalog.=) ANY (SELECT raw_events_second.user_id FROM public.raw_events_second_13300006 raw_events_second)))) AND (user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300003 raw_events_first WHERE ((user_id OPERATOR(pg_catalog.=) ANY (SELECT f2.id FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first_13300003 raw_events_first_1, public.reference_table_13300012 reference_table WHERE (raw_events_first_1.user_id OPERATOR(pg_catalog.=) reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first_1.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300003 raw_events_first_1, public.raw_events_second_13300007 raw_events_second WHERE (raw_events_first_1.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) OPERATOR(pg_catalog.>) (10)::numeric)) foo2) f2 ON ((f.id OPERATOR(pg_catalog.=) f2.id))) WHERE (f.id OPERATOR(pg_catalog.=) ANY (SELECT raw_events_second.user_id FROM public.raw_events_second_13300007 raw_events_second)))) AND (user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300004 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM public.raw_events_first_13300000 raw_events_first WHERE ((raw_events_first.user_id OPERATOR(pg_catalog.=) ANY (SELECT f2.id FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first_13300000 raw_events_first_1, public.reference_table_13300012 reference_table WHERE (raw_events_first_1.user_id OPERATOR(pg_catalog.=) reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first_1.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300000 raw_events_first_1, public.raw_events_second_13300004 raw_events_second WHERE (raw_events_first_1.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) OPERATOR(pg_catalog.>) (10)::numeric)) foo2) f2 ON ((f.id OPERATOR(pg_catalog.=) f2.id))) WHERE (f.id OPERATOR(pg_catalog.=) ANY (SELECT raw_events_second.user_id FROM public.raw_events_second_13300004 raw_events_second)))) AND (raw_events_first.user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM public.raw_events_first_13300001 raw_events_first WHERE ((raw_events_first.user_id OPERATOR(pg_catalog.=) ANY (SELECT f2.id FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first_13300001 raw_events_first_1, public.reference_table_13300012 reference_table WHERE (raw_events_first_1.user_id OPERATOR(pg_catalog.=) reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first_1.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300001 raw_events_first_1, public.raw_events_second_13300005 raw_events_second WHERE (raw_events_first_1.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) OPERATOR(pg_catalog.>) (10)::numeric)) foo2) f2 ON ((f.id OPERATOR(pg_catalog.=) f2.id))) WHERE (f.id OPERATOR(pg_catalog.=) ANY (SELECT raw_events_second.user_id FROM public.raw_events_second_13300005 raw_events_second)))) AND (raw_events_first.user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300006 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM public.raw_events_first_13300002 raw_events_first WHERE ((raw_events_first.user_id OPERATOR(pg_catalog.=) ANY (SELECT f2.id FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first_13300002 raw_events_first_1, public.reference_table_13300012 reference_table WHERE (raw_events_first_1.user_id OPERATOR(pg_catalog.=) reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first_1.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300002 raw_events_first_1, public.raw_events_second_13300006 raw_events_second WHERE (raw_events_first_1.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) OPERATOR(pg_catalog.>) (10)::numeric)) foo2) f2 ON ((f.id OPERATOR(pg_catalog.=) f2.id))) WHERE (f.id OPERATOR(pg_catalog.=) ANY (SELECT raw_events_second.user_id FROM public.raw_events_second_13300006 raw_events_second)))) AND (raw_events_first.user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM public.raw_events_first_13300003 raw_events_first WHERE ((raw_events_first.user_id OPERATOR(pg_catalog.=) ANY (SELECT f2.id FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first_13300003 raw_events_first_1, public.reference_table_13300012 reference_table WHERE (raw_events_first_1.user_id OPERATOR(pg_catalog.=) reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first_1.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300003 raw_events_first_1, public.raw_events_second_13300007 raw_events_second WHERE (raw_events_first_1.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) OPERATOR(pg_catalog.>) (10)::numeric)) foo2) f2 ON ((f.id OPERATOR(pg_catalog.=) f2.id))) WHERE (f.id OPERATOR(pg_catalog.=) ANY (SELECT raw_events_second.user_id FROM public.raw_events_second_13300007 raw_events_second)))) AND (raw_events_first.user_id IS NOT NULL))
|
||||
RESET client_min_messages;
|
||||
-- cannot push down since top level user_id is matched with NOT IN
|
||||
INSERT INTO raw_events_second
|
||||
|
@ -2004,16 +2015,16 @@ truncate raw_events_first;
|
|||
SET client_min_messages TO DEBUG2;
|
||||
-- first show that the query works now
|
||||
INSERT INTO raw_events_first SELECT * FROM raw_events_second;
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300000 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.raw_events_second_13300004 raw_events_second WHERE (user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300001 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.raw_events_second_13300005 raw_events_second WHERE (user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300002 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.raw_events_second_13300006 raw_events_second WHERE (user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300003 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.raw_events_second_13300007 raw_events_second WHERE (user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300000 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT raw_events_second.user_id, raw_events_second."time", raw_events_second.value_1, raw_events_second.value_2, raw_events_second.value_3, raw_events_second.value_4 FROM public.raw_events_second_13300004 raw_events_second WHERE (raw_events_second.user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300001 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT raw_events_second.user_id, raw_events_second."time", raw_events_second.value_1, raw_events_second.value_2, raw_events_second.value_3, raw_events_second.value_4 FROM public.raw_events_second_13300005 raw_events_second WHERE (raw_events_second.user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300002 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT raw_events_second.user_id, raw_events_second."time", raw_events_second.value_1, raw_events_second.value_2, raw_events_second.value_3, raw_events_second.value_4 FROM public.raw_events_second_13300006 raw_events_second WHERE (raw_events_second.user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300003 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT raw_events_second.user_id, raw_events_second."time", raw_events_second.value_1, raw_events_second.value_2, raw_events_second.value_3, raw_events_second.value_4 FROM public.raw_events_second_13300007 raw_events_second WHERE (raw_events_second.user_id IS NOT NULL)
|
||||
SET client_min_messages TO INFO;
|
||||
truncate raw_events_first;
|
||||
SET client_min_messages TO DEBUG2;
|
||||
-- now show that it works for a single shard query as well
|
||||
INSERT INTO raw_events_first SELECT * FROM raw_events_second WHERE user_id = 5;
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300000 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.raw_events_second_13300004 raw_events_second WHERE ((user_id OPERATOR(pg_catalog.=) 5) AND (user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300000 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT raw_events_second.user_id, raw_events_second."time", raw_events_second.value_1, raw_events_second.value_2, raw_events_second.value_3, raw_events_second.value_4 FROM public.raw_events_second_13300004 raw_events_second WHERE ((raw_events_second.user_id OPERATOR(pg_catalog.=) 5) AND (raw_events_second.user_id IS NOT NULL))
|
||||
DEBUG: Skipping target shard interval 13300001 since SELECT query for it pruned away
|
||||
DEBUG: Skipping target shard interval 13300002 since SELECT query for it pruned away
|
||||
DEBUG: Skipping target shard interval 13300003 since SELECT query for it pruned away
|
||||
|
@ -2034,7 +2045,7 @@ DETAIL: Insert query cannot be executed on all placements for shard xxxxx
|
|||
INSERT INTO raw_events_first SELECT * FROM raw_events_second WHERE user_id = 6;
|
||||
DEBUG: Skipping target shard interval 13300000 since SELECT query for it pruned away
|
||||
DEBUG: Skipping target shard interval 13300001 since SELECT query for it pruned away
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300002 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.raw_events_second_13300006 raw_events_second WHERE ((user_id OPERATOR(pg_catalog.=) 6) AND (user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300002 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT raw_events_second.user_id, raw_events_second."time", raw_events_second.value_1, raw_events_second.value_2, raw_events_second.value_3, raw_events_second.value_4 FROM public.raw_events_second_13300006 raw_events_second WHERE ((raw_events_second.user_id OPERATOR(pg_catalog.=) 6) AND (raw_events_second.user_id IS NOT NULL))
|
||||
DEBUG: Skipping target shard interval 13300003 since SELECT query for it pruned away
|
||||
SET client_min_messages TO INFO;
|
||||
-- mark the unhealthy placement as healthy again for the next tests
|
||||
|
@ -2045,16 +2056,16 @@ truncate raw_events_first;
|
|||
SET client_min_messages TO DEBUG2;
|
||||
-- this should work
|
||||
INSERT INTO raw_events_first SELECT * FROM raw_events_second;
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300000 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.raw_events_second_13300004 raw_events_second WHERE (user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300001 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.raw_events_second_13300005 raw_events_second WHERE (user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300002 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.raw_events_second_13300006 raw_events_second WHERE (user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300003 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.raw_events_second_13300007 raw_events_second WHERE (user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300000 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT raw_events_second.user_id, raw_events_second."time", raw_events_second.value_1, raw_events_second.value_2, raw_events_second.value_3, raw_events_second.value_4 FROM public.raw_events_second_13300004 raw_events_second WHERE (raw_events_second.user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300001 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT raw_events_second.user_id, raw_events_second."time", raw_events_second.value_1, raw_events_second.value_2, raw_events_second.value_3, raw_events_second.value_4 FROM public.raw_events_second_13300005 raw_events_second WHERE (raw_events_second.user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300002 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT raw_events_second.user_id, raw_events_second."time", raw_events_second.value_1, raw_events_second.value_2, raw_events_second.value_3, raw_events_second.value_4 FROM public.raw_events_second_13300006 raw_events_second WHERE (raw_events_second.user_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300003 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT raw_events_second.user_id, raw_events_second."time", raw_events_second.value_1, raw_events_second.value_2, raw_events_second.value_3, raw_events_second.value_4 FROM public.raw_events_second_13300007 raw_events_second WHERE (raw_events_second.user_id IS NOT NULL)
|
||||
SET client_min_messages TO INFO;
|
||||
truncate raw_events_first;
|
||||
SET client_min_messages TO DEBUG2;
|
||||
-- this should also work
|
||||
INSERT INTO raw_events_first SELECT * FROM raw_events_second WHERE user_id = 5;
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300000 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.raw_events_second_13300004 raw_events_second WHERE ((user_id OPERATOR(pg_catalog.=) 5) AND (user_id IS NOT NULL))
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300000 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT raw_events_second.user_id, raw_events_second."time", raw_events_second.value_1, raw_events_second.value_2, raw_events_second.value_3, raw_events_second.value_4 FROM public.raw_events_second_13300004 raw_events_second WHERE ((raw_events_second.user_id OPERATOR(pg_catalog.=) 5) AND (raw_events_second.user_id IS NOT NULL))
|
||||
DEBUG: Skipping target shard interval 13300001 since SELECT query for it pruned away
|
||||
DEBUG: Skipping target shard interval 13300002 since SELECT query for it pruned away
|
||||
DEBUG: Skipping target shard interval 13300003 since SELECT query for it pruned away
|
||||
|
@ -2108,64 +2119,64 @@ SELECT create_distributed_table('table_with_defaults', 'store_id');
|
|||
SET client_min_messages TO DEBUG2;
|
||||
-- a very simple query
|
||||
INSERT INTO table_with_defaults SELECT * FROM table_with_defaults;
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, last_name, default_2) SELECT store_id, first_name, default_1, last_name, default_2 FROM public.table_with_defaults_13300017 table_with_defaults WHERE (store_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, last_name, default_2) SELECT store_id, first_name, default_1, last_name, default_2 FROM public.table_with_defaults_13300018 table_with_defaults WHERE (store_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, last_name, default_2) SELECT table_with_defaults.store_id, table_with_defaults.first_name, table_with_defaults.default_1, table_with_defaults.last_name, table_with_defaults.default_2 FROM public.table_with_defaults_13300017 table_with_defaults WHERE (table_with_defaults.store_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, last_name, default_2) SELECT table_with_defaults.store_id, table_with_defaults.first_name, table_with_defaults.default_1, table_with_defaults.last_name, table_with_defaults.default_2 FROM public.table_with_defaults_13300018 table_with_defaults WHERE (table_with_defaults.store_id IS NOT NULL)
|
||||
-- see that defaults are filled
|
||||
INSERT INTO table_with_defaults (store_id, first_name)
|
||||
SELECT
|
||||
store_id, first_name
|
||||
FROM
|
||||
table_with_defaults;
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT store_id, first_name, 1 AS default_1, '2'::text AS default_2 FROM public.table_with_defaults_13300017 table_with_defaults WHERE (store_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT store_id, first_name, 1 AS default_1, '2'::text AS default_2 FROM public.table_with_defaults_13300018 table_with_defaults WHERE (store_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT table_with_defaults.store_id, table_with_defaults.first_name, 1 AS default_1, '2'::text AS default_2 FROM public.table_with_defaults_13300017 table_with_defaults WHERE (table_with_defaults.store_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT table_with_defaults.store_id, table_with_defaults.first_name, 1 AS default_1, '2'::text AS default_2 FROM public.table_with_defaults_13300018 table_with_defaults WHERE (table_with_defaults.store_id IS NOT NULL)
|
||||
-- shuffle one of the defaults and skip the other
|
||||
INSERT INTO table_with_defaults (default_2, store_id, first_name)
|
||||
SELECT
|
||||
default_2, store_id, first_name
|
||||
FROM
|
||||
table_with_defaults;
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT store_id, first_name, 1 AS default_1, default_2 FROM public.table_with_defaults_13300017 table_with_defaults WHERE (store_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT store_id, first_name, 1 AS default_1, default_2 FROM public.table_with_defaults_13300018 table_with_defaults WHERE (store_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT table_with_defaults.store_id, table_with_defaults.first_name, 1 AS default_1, table_with_defaults.default_2 FROM public.table_with_defaults_13300017 table_with_defaults WHERE (table_with_defaults.store_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT table_with_defaults.store_id, table_with_defaults.first_name, 1 AS default_1, table_with_defaults.default_2 FROM public.table_with_defaults_13300018 table_with_defaults WHERE (table_with_defaults.store_id IS NOT NULL)
|
||||
-- shuffle both defaults
|
||||
INSERT INTO table_with_defaults (default_2, store_id, default_1, first_name)
|
||||
SELECT
|
||||
default_2, store_id, default_1, first_name
|
||||
FROM
|
||||
table_with_defaults;
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT store_id, first_name, default_1, default_2 FROM public.table_with_defaults_13300017 table_with_defaults WHERE (store_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT store_id, first_name, default_1, default_2 FROM public.table_with_defaults_13300018 table_with_defaults WHERE (store_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT table_with_defaults.store_id, table_with_defaults.first_name, table_with_defaults.default_1, table_with_defaults.default_2 FROM public.table_with_defaults_13300017 table_with_defaults WHERE (table_with_defaults.store_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT table_with_defaults.store_id, table_with_defaults.first_name, table_with_defaults.default_1, table_with_defaults.default_2 FROM public.table_with_defaults_13300018 table_with_defaults WHERE (table_with_defaults.store_id IS NOT NULL)
|
||||
-- use constants instead of non-default column
|
||||
INSERT INTO table_with_defaults (default_2, last_name, store_id, first_name)
|
||||
SELECT
|
||||
default_2, 'Freund', store_id, 'Andres'
|
||||
FROM
|
||||
table_with_defaults;
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, last_name, default_2) SELECT store_id, 'Andres'::text AS first_name, 1 AS default_1, 'Freund'::text AS last_name, default_2 FROM public.table_with_defaults_13300017 table_with_defaults WHERE (store_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, last_name, default_2) SELECT store_id, 'Andres'::text AS first_name, 1 AS default_1, 'Freund'::text AS last_name, default_2 FROM public.table_with_defaults_13300018 table_with_defaults WHERE (store_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, last_name, default_2) SELECT table_with_defaults.store_id, 'Andres'::text AS first_name, 1 AS default_1, 'Freund'::text AS last_name, table_with_defaults.default_2 FROM public.table_with_defaults_13300017 table_with_defaults WHERE (table_with_defaults.store_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, last_name, default_2) SELECT table_with_defaults.store_id, 'Andres'::text AS first_name, 1 AS default_1, 'Freund'::text AS last_name, table_with_defaults.default_2 FROM public.table_with_defaults_13300018 table_with_defaults WHERE (table_with_defaults.store_id IS NOT NULL)
|
||||
-- use constants instead of non-default column and skip both defauls
|
||||
INSERT INTO table_with_defaults (last_name, store_id, first_name)
|
||||
SELECT
|
||||
'Freund', store_id, 'Andres'
|
||||
FROM
|
||||
table_with_defaults;
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, last_name, default_2) SELECT store_id, 'Andres'::text AS first_name, 1 AS default_1, 'Freund'::text AS last_name, '2'::text AS default_2 FROM public.table_with_defaults_13300017 table_with_defaults WHERE (store_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, last_name, default_2) SELECT store_id, 'Andres'::text AS first_name, 1 AS default_1, 'Freund'::text AS last_name, '2'::text AS default_2 FROM public.table_with_defaults_13300018 table_with_defaults WHERE (store_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, last_name, default_2) SELECT table_with_defaults.store_id, 'Andres'::text AS first_name, 1 AS default_1, 'Freund'::text AS last_name, '2'::text AS default_2 FROM public.table_with_defaults_13300017 table_with_defaults WHERE (table_with_defaults.store_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, last_name, default_2) SELECT table_with_defaults.store_id, 'Andres'::text AS first_name, 1 AS default_1, 'Freund'::text AS last_name, '2'::text AS default_2 FROM public.table_with_defaults_13300018 table_with_defaults WHERE (table_with_defaults.store_id IS NOT NULL)
|
||||
-- use constants instead of default columns
|
||||
INSERT INTO table_with_defaults (default_2, last_name, store_id, first_name, default_1)
|
||||
SELECT
|
||||
20, last_name, store_id, first_name, 10
|
||||
FROM
|
||||
table_with_defaults;
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, last_name, default_2) SELECT store_id, first_name, 10, last_name, 20 FROM public.table_with_defaults_13300017 table_with_defaults WHERE (store_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, last_name, default_2) SELECT store_id, first_name, 10, last_name, 20 FROM public.table_with_defaults_13300018 table_with_defaults WHERE (store_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, last_name, default_2) SELECT table_with_defaults.store_id, table_with_defaults.first_name, 10, table_with_defaults.last_name, 20 FROM public.table_with_defaults_13300017 table_with_defaults WHERE (table_with_defaults.store_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, last_name, default_2) SELECT table_with_defaults.store_id, table_with_defaults.first_name, 10, table_with_defaults.last_name, 20 FROM public.table_with_defaults_13300018 table_with_defaults WHERE (table_with_defaults.store_id IS NOT NULL)
|
||||
-- use constants instead of both default columns and non-default columns
|
||||
INSERT INTO table_with_defaults (default_2, last_name, store_id, first_name, default_1)
|
||||
SELECT
|
||||
20, 'Freund', store_id, 'Andres', 10
|
||||
FROM
|
||||
table_with_defaults;
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, last_name, default_2) SELECT store_id, 'Andres'::text AS first_name, 10, 'Freund'::text AS last_name, 20 FROM public.table_with_defaults_13300017 table_with_defaults WHERE (store_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, last_name, default_2) SELECT store_id, 'Andres'::text AS first_name, 10, 'Freund'::text AS last_name, 20 FROM public.table_with_defaults_13300018 table_with_defaults WHERE (store_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, last_name, default_2) SELECT table_with_defaults.store_id, 'Andres'::text AS first_name, 10, 'Freund'::text AS last_name, 20 FROM public.table_with_defaults_13300017 table_with_defaults WHERE (table_with_defaults.store_id IS NOT NULL)
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, last_name, default_2) SELECT table_with_defaults.store_id, 'Andres'::text AS first_name, 10, 'Freund'::text AS last_name, 20 FROM public.table_with_defaults_13300018 table_with_defaults WHERE (table_with_defaults.store_id IS NOT NULL)
|
||||
-- some of the ultimate queries where we have constants,
|
||||
-- defaults and group by entry is not on the target entry
|
||||
INSERT INTO table_with_defaults (default_2, store_id, first_name)
|
||||
|
@ -2175,8 +2186,8 @@ FROM
|
|||
table_with_defaults
|
||||
GROUP BY
|
||||
last_name, store_id;
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT store_id, 'Andres'::text AS first_name, 1 AS default_1, '2000'::text AS default_2 FROM public.table_with_defaults_13300017 table_with_defaults WHERE (store_id IS NOT NULL) GROUP BY last_name, store_id
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT store_id, 'Andres'::text AS first_name, 1 AS default_1, '2000'::text AS default_2 FROM public.table_with_defaults_13300018 table_with_defaults WHERE (store_id IS NOT NULL) GROUP BY last_name, store_id
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT table_with_defaults.store_id, 'Andres'::text AS first_name, 1 AS default_1, '2000'::text AS default_2 FROM public.table_with_defaults_13300017 table_with_defaults WHERE (table_with_defaults.store_id IS NOT NULL) GROUP BY table_with_defaults.last_name, table_with_defaults.store_id
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT table_with_defaults.store_id, 'Andres'::text AS first_name, 1 AS default_1, '2000'::text AS default_2 FROM public.table_with_defaults_13300018 table_with_defaults WHERE (table_with_defaults.store_id IS NOT NULL) GROUP BY table_with_defaults.last_name, table_with_defaults.store_id
|
||||
INSERT INTO table_with_defaults (default_1, store_id, first_name, default_2)
|
||||
SELECT
|
||||
1000, store_id, 'Andres', '2000'
|
||||
|
@ -2184,8 +2195,8 @@ FROM
|
|||
table_with_defaults
|
||||
GROUP BY
|
||||
last_name, store_id, first_name;
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT store_id, 'Andres'::text AS first_name, 1000, '2000'::text AS default_2 FROM public.table_with_defaults_13300017 table_with_defaults WHERE (store_id IS NOT NULL) GROUP BY last_name, store_id, first_name
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT store_id, 'Andres'::text AS first_name, 1000, '2000'::text AS default_2 FROM public.table_with_defaults_13300018 table_with_defaults WHERE (store_id IS NOT NULL) GROUP BY last_name, store_id, first_name
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT table_with_defaults.store_id, 'Andres'::text AS first_name, 1000, '2000'::text AS default_2 FROM public.table_with_defaults_13300017 table_with_defaults WHERE (table_with_defaults.store_id IS NOT NULL) GROUP BY table_with_defaults.last_name, table_with_defaults.store_id, table_with_defaults.first_name
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT table_with_defaults.store_id, 'Andres'::text AS first_name, 1000, '2000'::text AS default_2 FROM public.table_with_defaults_13300018 table_with_defaults WHERE (table_with_defaults.store_id IS NOT NULL) GROUP BY table_with_defaults.last_name, table_with_defaults.store_id, table_with_defaults.first_name
|
||||
INSERT INTO table_with_defaults (default_1, store_id, first_name, default_2)
|
||||
SELECT
|
||||
1000, store_id, 'Andres', '2000'
|
||||
|
@ -2193,8 +2204,8 @@ FROM
|
|||
table_with_defaults
|
||||
GROUP BY
|
||||
last_name, store_id, first_name, default_2;
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT store_id, 'Andres'::text AS first_name, 1000, '2000'::text AS default_2 FROM public.table_with_defaults_13300017 table_with_defaults WHERE (store_id IS NOT NULL) GROUP BY last_name, store_id, first_name, default_2
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT store_id, 'Andres'::text AS first_name, 1000, '2000'::text AS default_2 FROM public.table_with_defaults_13300018 table_with_defaults WHERE (store_id IS NOT NULL) GROUP BY last_name, store_id, first_name, default_2
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT table_with_defaults.store_id, 'Andres'::text AS first_name, 1000, '2000'::text AS default_2 FROM public.table_with_defaults_13300017 table_with_defaults WHERE (table_with_defaults.store_id IS NOT NULL) GROUP BY table_with_defaults.last_name, table_with_defaults.store_id, table_with_defaults.first_name, table_with_defaults.default_2
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT table_with_defaults.store_id, 'Andres'::text AS first_name, 1000, '2000'::text AS default_2 FROM public.table_with_defaults_13300018 table_with_defaults WHERE (table_with_defaults.store_id IS NOT NULL) GROUP BY table_with_defaults.last_name, table_with_defaults.store_id, table_with_defaults.first_name, table_with_defaults.default_2
|
||||
INSERT INTO table_with_defaults (default_1, store_id, first_name)
|
||||
SELECT
|
||||
1000, store_id, 'Andres'
|
||||
|
@ -2202,8 +2213,8 @@ FROM
|
|||
table_with_defaults
|
||||
GROUP BY
|
||||
last_name, store_id, first_name, default_2;
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT store_id, 'Andres'::text AS first_name, 1000, '2'::text AS default_2 FROM public.table_with_defaults_13300017 table_with_defaults WHERE (store_id IS NOT NULL) GROUP BY last_name, store_id, first_name, default_2
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT store_id, 'Andres'::text AS first_name, 1000, '2'::text AS default_2 FROM public.table_with_defaults_13300018 table_with_defaults WHERE (store_id IS NOT NULL) GROUP BY last_name, store_id, first_name, default_2
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT table_with_defaults.store_id, 'Andres'::text AS first_name, 1000, '2'::text AS default_2 FROM public.table_with_defaults_13300017 table_with_defaults WHERE (table_with_defaults.store_id IS NOT NULL) GROUP BY table_with_defaults.last_name, table_with_defaults.store_id, table_with_defaults.first_name, table_with_defaults.default_2
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT table_with_defaults.store_id, 'Andres'::text AS first_name, 1000, '2'::text AS default_2 FROM public.table_with_defaults_13300018 table_with_defaults WHERE (table_with_defaults.store_id IS NOT NULL) GROUP BY table_with_defaults.last_name, table_with_defaults.store_id, table_with_defaults.first_name, table_with_defaults.default_2
|
||||
RESET client_min_messages;
|
||||
-- Stable function in default should be allowed
|
||||
ALTER TABLE table_with_defaults ADD COLUMN t timestamptz DEFAULT now();
|
||||
|
@ -2423,20 +2434,20 @@ SELECT s, nextval('insert_select_test_seq') FROM generate_series(1, 5) s
|
|||
ON CONFLICT DO NOTHING;
|
||||
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300000 AS citus_table_alias (user_id, value_1) SELECT user_id, value_1 FROM read_intermediate_result('insert_select_XXX_13300000'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer) ON CONFLICT DO NOTHING
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300001 AS citus_table_alias (user_id, value_1) SELECT user_id, value_1 FROM read_intermediate_result('insert_select_XXX_13300001'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer) ON CONFLICT DO NOTHING
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300002 AS citus_table_alias (user_id, value_1) SELECT user_id, value_1 FROM read_intermediate_result('insert_select_XXX_13300002'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer) ON CONFLICT DO NOTHING
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300003 AS citus_table_alias (user_id, value_1) SELECT user_id, value_1 FROM read_intermediate_result('insert_select_XXX_13300003'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer) ON CONFLICT DO NOTHING
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300000 AS citus_table_alias (user_id, value_1) SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('insert_select_XXX_13300000'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer) ON CONFLICT DO NOTHING
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300001 AS citus_table_alias (user_id, value_1) SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('insert_select_XXX_13300001'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer) ON CONFLICT DO NOTHING
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300002 AS citus_table_alias (user_id, value_1) SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('insert_select_XXX_13300002'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer) ON CONFLICT DO NOTHING
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300003 AS citus_table_alias (user_id, value_1) SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('insert_select_XXX_13300003'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer) ON CONFLICT DO NOTHING
|
||||
-- RETURNING is supported
|
||||
INSERT INTO raw_events_first (user_id, value_1)
|
||||
SELECT s, nextval('insert_select_test_seq') FROM generate_series(1, 5) s
|
||||
RETURNING *;
|
||||
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300000 AS citus_table_alias (user_id, value_1) SELECT user_id, value_1 FROM read_intermediate_result('insert_select_XXX_13300000'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300001 AS citus_table_alias (user_id, value_1) SELECT user_id, value_1 FROM read_intermediate_result('insert_select_XXX_13300001'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300002 AS citus_table_alias (user_id, value_1) SELECT user_id, value_1 FROM read_intermediate_result('insert_select_XXX_13300002'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300003 AS citus_table_alias (user_id, value_1) SELECT user_id, value_1 FROM read_intermediate_result('insert_select_XXX_13300003'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300000 AS citus_table_alias (user_id, value_1) SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('insert_select_XXX_13300000'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300001 AS citus_table_alias (user_id, value_1) SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('insert_select_XXX_13300001'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300002 AS citus_table_alias (user_id, value_1) SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('insert_select_XXX_13300002'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4
|
||||
DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300003 AS citus_table_alias (user_id, value_1) SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('insert_select_XXX_13300003'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4
|
||||
user_id | time | value_1 | value_2 | value_3 | value_4
|
||||
---------------------------------------------------------------------
|
||||
1 | | 11 | | |
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,3 +1,17 @@
|
|||
--
|
||||
-- MULTI_INSERT_SELECT_CONFLICT
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
CREATE SCHEMA on_conflict;
|
||||
SET search_path TO on_conflict, public;
|
||||
SET citus.next_shard_id TO 1900000;
|
||||
|
@ -66,7 +80,7 @@ WITH inserted_table AS (
|
|||
source_table_1
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *
|
||||
) SELECT * FROM inserted_table ORDER BY 1;
|
||||
DEBUG: generating subplan XXX_1 for CTE inserted_table: INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_2, col_3 FROM on_conflict.source_table_1 ON CONFLICT(col_1) DO UPDATE SET col_2 = excluded.col_2 RETURNING target_table.col_1, target_table.col_2
|
||||
DEBUG: generating subplan XXX_1 for CTE inserted_table: INSERT INTO on_conflict.target_table (col_1, col_2) SELECT source_table_1.col_2, source_table_1.col_3 FROM on_conflict.source_table_1 ON CONFLICT(col_1) DO UPDATE SET col_2 = excluded.col_2 RETURNING target_table.col_1, target_table.col_2
|
||||
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
|
||||
DETAIL: The target table's partition column should correspond to a partition column in the subquery.
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) inserted_table ORDER BY col_1
|
||||
|
@ -112,7 +126,7 @@ WITH inserted_table AS (
|
|||
) as foo
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *
|
||||
) SELECT * FROM inserted_table ORDER BY 1;
|
||||
DEBUG: generating subplan XXX_1 for CTE inserted_table: INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_1, col_2 FROM (SELECT source_table_1.col_1, source_table_1.col_2, source_table_1.col_3 FROM on_conflict.source_table_1 LIMIT 5) foo ON CONFLICT(col_1) DO UPDATE SET col_2 = excluded.col_2 RETURNING target_table.col_1, target_table.col_2
|
||||
DEBUG: generating subplan XXX_1 for CTE inserted_table: INSERT INTO on_conflict.target_table (col_1, col_2) SELECT foo.col_1, foo.col_2 FROM (SELECT source_table_1.col_1, source_table_1.col_2, source_table_1.col_3 FROM on_conflict.source_table_1 LIMIT 5) foo ON CONFLICT(col_1) DO UPDATE SET col_2 = excluded.col_2 RETURNING target_table.col_1, target_table.col_2
|
||||
DEBUG: LIMIT clauses are not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: push down of limit count: 5
|
||||
DEBUG: generating subplan XXX_1 for subquery SELECT col_1, col_2, col_3 FROM on_conflict.source_table_1 LIMIT 5
|
||||
|
@ -148,7 +162,7 @@ WITH inserted_table AS (
|
|||
) as foo
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = 0 RETURNING *
|
||||
) SELECT * FROM inserted_table ORDER BY 1;
|
||||
DEBUG: generating subplan XXX_1 for CTE inserted_table: INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_1, col_2 FROM ((SELECT source_table_1.col_1, source_table_1.col_2, source_table_1.col_3 FROM on_conflict.source_table_1 LIMIT 5) UNION (SELECT source_table_2.col_1, source_table_2.col_2, source_table_2.col_3 FROM on_conflict.source_table_2 LIMIT 5)) foo ON CONFLICT(col_1) DO UPDATE SET col_2 = 0 RETURNING target_table.col_1, target_table.col_2
|
||||
DEBUG: generating subplan XXX_1 for CTE inserted_table: INSERT INTO on_conflict.target_table (col_1, col_2) SELECT foo.col_1, foo.col_2 FROM ((SELECT source_table_1.col_1, source_table_1.col_2, source_table_1.col_3 FROM on_conflict.source_table_1 LIMIT 5) UNION (SELECT source_table_2.col_1, source_table_2.col_2, source_table_2.col_3 FROM on_conflict.source_table_2 LIMIT 5)) foo ON CONFLICT(col_1) DO UPDATE SET col_2 = 0 RETURNING target_table.col_1, target_table.col_2
|
||||
DEBUG: Set operations are not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: push down of limit count: 5
|
||||
DEBUG: generating subplan XXX_1 for subquery SELECT col_1, col_2, col_3 FROM on_conflict.source_table_1 LIMIT 5
|
||||
|
@ -239,7 +253,7 @@ WITH inserted_table AS MATERIALIZED (
|
|||
)
|
||||
INSERT INTO target_table SELECT * FROM cte_2 ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 + 1 RETURNING *
|
||||
) SELECT * FROM inserted_table ORDER BY 1;
|
||||
DEBUG: generating subplan XXX_1 for CTE inserted_table: WITH cte AS MATERIALIZED (SELECT source_table_1.col_1, source_table_1.col_2, source_table_1.col_3 FROM on_conflict.source_table_1), cte_2 AS MATERIALIZED (SELECT cte.col_1, cte.col_2 FROM cte) INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_1, col_2 FROM cte_2 ON CONFLICT(col_1) DO UPDATE SET col_2 = (excluded.col_2 OPERATOR(pg_catalog.+) 1) RETURNING target_table.col_1, target_table.col_2
|
||||
DEBUG: generating subplan XXX_1 for CTE inserted_table: WITH cte AS MATERIALIZED (SELECT source_table_1.col_1, source_table_1.col_2, source_table_1.col_3 FROM on_conflict.source_table_1), cte_2 AS MATERIALIZED (SELECT cte.col_1, cte.col_2 FROM cte) INSERT INTO on_conflict.target_table (col_1, col_2) SELECT cte_2.col_1, cte_2.col_2 FROM cte_2 ON CONFLICT(col_1) DO UPDATE SET col_2 = (excluded.col_2 OPERATOR(pg_catalog.+) 1) RETURNING target_table.col_1, target_table.col_2
|
||||
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
|
||||
DEBUG: generating subplan XXX_1 for CTE cte: SELECT col_1, col_2, col_3 FROM on_conflict.source_table_1
|
||||
DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) cte
|
||||
|
@ -262,7 +276,7 @@ WITH cte AS MATERIALIZED (
|
|||
INSERT INTO target_table (SELECT * FROM basic) ON CONFLICT DO NOTHING RETURNING *
|
||||
)
|
||||
UPDATE target_table SET col_2 = 4 WHERE col_1 IN (SELECT col_1 FROM cte);
|
||||
DEBUG: generating subplan XXX_1 for CTE cte: WITH basic AS MATERIALIZED (SELECT source_table_1.col_1, source_table_1.col_2 FROM on_conflict.source_table_1) INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_1, col_2 FROM basic ON CONFLICT DO NOTHING RETURNING target_table.col_1, target_table.col_2
|
||||
DEBUG: generating subplan XXX_1 for CTE cte: WITH basic AS MATERIALIZED (SELECT source_table_1.col_1, source_table_1.col_2 FROM on_conflict.source_table_1) INSERT INTO on_conflict.target_table (col_1, col_2) SELECT basic.col_1, basic.col_2 FROM basic ON CONFLICT DO NOTHING RETURNING target_table.col_1, target_table.col_2
|
||||
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
|
||||
DEBUG: generating subplan XXX_1 for CTE basic: SELECT col_1, col_2 FROM on_conflict.source_table_1
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT basic.col_1, basic.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) basic) citus_insert_select_subquery
|
||||
|
@ -557,7 +571,7 @@ SELECT DISTINCT col_2 FROM target_table;
|
|||
|
||||
WITH cte_1 AS (INSERT INTO target_table SELECT * FROM target_table LIMIT 10000 ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 + 1 RETURNING *)
|
||||
SELECT DISTINCT col_2 FROM cte_1;
|
||||
DEBUG: generating subplan XXX_1 for CTE cte_1: INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_1, col_2 FROM on_conflict.target_table LIMIT 10000 ON CONFLICT(col_1) DO UPDATE SET col_2 = (excluded.col_2 OPERATOR(pg_catalog.+) 1) RETURNING target_table.col_1, target_table.col_2
|
||||
DEBUG: generating subplan XXX_1 for CTE cte_1: INSERT INTO on_conflict.target_table (col_1, col_2) SELECT target_table_1.col_1, target_table_1.col_2 FROM on_conflict.target_table target_table_1 LIMIT 10000 ON CONFLICT(col_1) DO UPDATE SET col_2 = (excluded.col_2 OPERATOR(pg_catalog.+) 1) RETURNING target_table.col_1, target_table.col_2
|
||||
DEBUG: LIMIT clauses are not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: push down of limit count: 10000
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte_1
|
||||
|
|
|
@ -0,0 +1,593 @@
|
|||
--
|
||||
-- MULTI_INSERT_SELECT_CONFLICT
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
||||
CREATE SCHEMA on_conflict;
|
||||
SET search_path TO on_conflict, public;
|
||||
SET citus.next_shard_id TO 1900000;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE target_table(col_1 int primary key, col_2 int);
|
||||
SELECT create_distributed_table('target_table','col_1');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO target_table VALUES(1,2),(2,3),(3,4),(4,5),(5,6);
|
||||
CREATE TABLE source_table_1(col_1 int primary key, col_2 int, col_3 int);
|
||||
SELECT create_distributed_table('source_table_1','col_1');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO source_table_1 VALUES(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5);
|
||||
CREATE TABLE source_table_2(col_1 int, col_2 int, col_3 int);
|
||||
SELECT create_distributed_table('source_table_2','col_1');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO source_table_2 VALUES(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10);
|
||||
SET client_min_messages to debug1;
|
||||
-- Generate series directly on the coordinator and on conflict do nothing
|
||||
INSERT INTO target_table (col_1, col_2)
|
||||
SELECT
|
||||
s, s
|
||||
FROM
|
||||
generate_series(1,10) s
|
||||
ON CONFLICT DO NOTHING;
|
||||
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
-- Generate series directly on the coordinator and on conflict update the target table
|
||||
INSERT INTO target_table (col_1, col_2)
|
||||
SELECT s, s
|
||||
FROM
|
||||
generate_series(1,10) s
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 + 1;
|
||||
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
-- Since partition columns do not match, pull the data to the coordinator
|
||||
-- and do not change conflicted values
|
||||
INSERT INTO target_table
|
||||
SELECT
|
||||
col_2, col_3
|
||||
FROM
|
||||
source_table_1
|
||||
ON CONFLICT DO NOTHING;
|
||||
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
|
||||
DETAIL: The target table's partition column should correspond to a partition column in the subquery.
|
||||
DEBUG: performing repartitioned INSERT ... SELECT
|
||||
-- Since partition columns do not match, pull the data to the coordinator
|
||||
-- and update the non-partition column. Query is wrapped by CTE to return
|
||||
-- ordered result.
|
||||
WITH inserted_table AS (
|
||||
INSERT INTO target_table
|
||||
SELECT
|
||||
col_2, col_3
|
||||
FROM
|
||||
source_table_1
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *
|
||||
) SELECT * FROM inserted_table ORDER BY 1;
|
||||
DEBUG: generating subplan XXX_1 for CTE inserted_table: INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_2, col_3 FROM on_conflict.source_table_1 ON CONFLICT(col_1) DO UPDATE SET col_2 = excluded.col_2 RETURNING target_table.col_1, target_table.col_2
|
||||
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
|
||||
DETAIL: The target table's partition column should correspond to a partition column in the subquery.
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) inserted_table ORDER BY col_1
|
||||
DEBUG: performing repartitioned INSERT ... SELECT
|
||||
col_1 | col_2
|
||||
---------------------------------------------------------------------
|
||||
1 | 1
|
||||
2 | 2
|
||||
3 | 3
|
||||
4 | 4
|
||||
5 | 5
|
||||
(5 rows)
|
||||
|
||||
-- Subquery should be recursively planned due to the limit and do nothing on conflict
|
||||
INSERT INTO target_table
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM (
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table_1
|
||||
LIMIT 5
|
||||
) as foo
|
||||
ON CONFLICT DO NOTHING;
|
||||
DEBUG: LIMIT clauses are not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: push down of limit count: 5
|
||||
DEBUG: generating subplan XXX_1 for subquery SELECT col_1, col_2, col_3 FROM on_conflict.source_table_1 LIMIT 5
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) foo
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
-- Subquery should be recursively planned due to the limit and update on conflict
|
||||
-- Query is wrapped by CTE to return ordered result.
|
||||
WITH inserted_table AS (
|
||||
INSERT INTO target_table
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM (
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table_1
|
||||
LIMIT 5
|
||||
) as foo
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *
|
||||
) SELECT * FROM inserted_table ORDER BY 1;
|
||||
DEBUG: generating subplan XXX_1 for CTE inserted_table: INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_1, col_2 FROM (SELECT source_table_1.col_1, source_table_1.col_2, source_table_1.col_3 FROM on_conflict.source_table_1 LIMIT 5) foo ON CONFLICT(col_1) DO UPDATE SET col_2 = excluded.col_2 RETURNING target_table.col_1, target_table.col_2
|
||||
DEBUG: LIMIT clauses are not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: push down of limit count: 5
|
||||
DEBUG: generating subplan XXX_1 for subquery SELECT col_1, col_2, col_3 FROM on_conflict.source_table_1 LIMIT 5
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) foo
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) inserted_table ORDER BY col_1
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
col_1 | col_2
|
||||
---------------------------------------------------------------------
|
||||
1 | 1
|
||||
2 | 2
|
||||
3 | 3
|
||||
4 | 4
|
||||
5 | 5
|
||||
(5 rows)
|
||||
|
||||
-- Test with multiple subqueries. Query is wrapped by CTE to return ordered result.
|
||||
WITH inserted_table AS (
|
||||
INSERT INTO target_table
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM (
|
||||
(SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table_1
|
||||
LIMIT 5)
|
||||
UNION
|
||||
(SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table_2
|
||||
LIMIT 5)
|
||||
) as foo
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = 0 RETURNING *
|
||||
) SELECT * FROM inserted_table ORDER BY 1;
|
||||
DEBUG: generating subplan XXX_1 for CTE inserted_table: INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_1, col_2 FROM ((SELECT source_table_1.col_1, source_table_1.col_2, source_table_1.col_3 FROM on_conflict.source_table_1 LIMIT 5) UNION (SELECT source_table_2.col_1, source_table_2.col_2, source_table_2.col_3 FROM on_conflict.source_table_2 LIMIT 5)) foo ON CONFLICT(col_1) DO UPDATE SET col_2 = 0 RETURNING target_table.col_1, target_table.col_2
|
||||
DEBUG: Set operations are not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: push down of limit count: 5
|
||||
DEBUG: generating subplan XXX_1 for subquery SELECT col_1, col_2, col_3 FROM on_conflict.source_table_1 LIMIT 5
|
||||
DEBUG: push down of limit count: 5
|
||||
DEBUG: generating subplan XXX_2 for subquery SELECT col_1, col_2, col_3 FROM on_conflict.source_table_2 LIMIT 5
|
||||
DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer) UNION SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) foo
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) inserted_table ORDER BY col_1
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
col_1 | col_2
|
||||
---------------------------------------------------------------------
|
||||
1 | 0
|
||||
2 | 0
|
||||
3 | 0
|
||||
4 | 0
|
||||
5 | 0
|
||||
6 | 0
|
||||
7 | 0
|
||||
8 | 0
|
||||
9 | 0
|
||||
10 | 0
|
||||
(10 rows)
|
||||
|
||||
-- Get the select part from cte and do nothing on conflict
|
||||
WITH cte AS MATERIALIZED (
|
||||
SELECT col_1, col_2 FROM source_table_1
|
||||
)
|
||||
INSERT INTO target_table SELECT * FROM cte ON CONFLICT DO NOTHING;
|
||||
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
|
||||
DEBUG: generating subplan XXX_1 for CTE cte: SELECT col_1, col_2 FROM on_conflict.source_table_1
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT cte.col_1, cte.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte) citus_insert_select_subquery
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
-- Get the select part from cte and update on conflict
|
||||
WITH cte AS MATERIALIZED (
|
||||
SELECT col_1, col_2 FROM source_table_1
|
||||
)
|
||||
INSERT INTO target_table SELECT * FROM cte ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 + 1;
|
||||
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
|
||||
DEBUG: generating subplan XXX_1 for CTE cte: SELECT col_1, col_2 FROM on_conflict.source_table_1
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT cte.col_1, cte.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte) citus_insert_select_subquery
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
SELECT * FROM target_table ORDER BY 1;
|
||||
col_1 | col_2
|
||||
---------------------------------------------------------------------
|
||||
1 | 2
|
||||
2 | 3
|
||||
3 | 4
|
||||
4 | 5
|
||||
5 | 6
|
||||
6 | 0
|
||||
7 | 0
|
||||
8 | 0
|
||||
9 | 0
|
||||
10 | 0
|
||||
(10 rows)
|
||||
|
||||
-- Test with multiple CTEs
|
||||
WITH cte AS(
|
||||
SELECT col_1, col_2 FROM source_table_1
|
||||
), cte_2 AS(
|
||||
SELECT col_1, col_2 FROM source_table_2
|
||||
)
|
||||
INSERT INTO target_table ((SELECT * FROM cte) UNION (SELECT * FROM cte_2)) ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 + 1;
|
||||
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
|
||||
DEBUG: CTE cte is going to be inlined via distributed planning
|
||||
DEBUG: CTE cte_2 is going to be inlined via distributed planning
|
||||
DEBUG: performing repartitioned INSERT ... SELECT
|
||||
SELECT * FROM target_table ORDER BY 1;
|
||||
col_1 | col_2
|
||||
---------------------------------------------------------------------
|
||||
1 | 2
|
||||
2 | 3
|
||||
3 | 4
|
||||
4 | 5
|
||||
5 | 6
|
||||
6 | 7
|
||||
7 | 8
|
||||
8 | 9
|
||||
9 | 10
|
||||
10 | 11
|
||||
(10 rows)
|
||||
|
||||
WITH inserted_table AS MATERIALIZED (
|
||||
WITH cte AS MATERIALIZED (
|
||||
SELECT col_1, col_2, col_3 FROM source_table_1
|
||||
), cte_2 AS MATERIALIZED (
|
||||
SELECT col_1, col_2 FROM cte
|
||||
)
|
||||
INSERT INTO target_table SELECT * FROM cte_2 ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 + 1 RETURNING *
|
||||
) SELECT * FROM inserted_table ORDER BY 1;
|
||||
DEBUG: generating subplan XXX_1 for CTE inserted_table: WITH cte AS MATERIALIZED (SELECT source_table_1.col_1, source_table_1.col_2, source_table_1.col_3 FROM on_conflict.source_table_1), cte_2 AS MATERIALIZED (SELECT cte.col_1, cte.col_2 FROM cte) INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_1, col_2 FROM cte_2 ON CONFLICT(col_1) DO UPDATE SET col_2 = (excluded.col_2 OPERATOR(pg_catalog.+) 1) RETURNING target_table.col_1, target_table.col_2
|
||||
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
|
||||
DEBUG: generating subplan XXX_1 for CTE cte: SELECT col_1, col_2, col_3 FROM on_conflict.source_table_1
|
||||
DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) cte
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT cte_2.col_1, cte_2.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte_2) citus_insert_select_subquery
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) inserted_table ORDER BY col_1
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
col_1 | col_2
|
||||
---------------------------------------------------------------------
|
||||
1 | 2
|
||||
2 | 3
|
||||
3 | 4
|
||||
4 | 5
|
||||
5 | 6
|
||||
(5 rows)
|
||||
|
||||
WITH cte AS MATERIALIZED (
|
||||
WITH basic AS MATERIALIZED (
|
||||
SELECT col_1, col_2 FROM source_table_1
|
||||
)
|
||||
INSERT INTO target_table (SELECT * FROM basic) ON CONFLICT DO NOTHING RETURNING *
|
||||
)
|
||||
UPDATE target_table SET col_2 = 4 WHERE col_1 IN (SELECT col_1 FROM cte);
|
||||
DEBUG: generating subplan XXX_1 for CTE cte: WITH basic AS MATERIALIZED (SELECT source_table_1.col_1, source_table_1.col_2 FROM on_conflict.source_table_1) INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_1, col_2 FROM basic ON CONFLICT DO NOTHING RETURNING target_table.col_1, target_table.col_2
|
||||
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
|
||||
DEBUG: generating subplan XXX_1 for CTE basic: SELECT col_1, col_2 FROM on_conflict.source_table_1
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT basic.col_1, basic.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) basic) citus_insert_select_subquery
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE on_conflict.target_table SET col_2 = 4 WHERE (col_1 OPERATOR(pg_catalog.=) ANY (SELECT cte.col_1 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte))
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
RESET client_min_messages;
|
||||
-- Following query is supported by using repartition join for the insert/select
|
||||
SELECT coordinator_plan($Q$
|
||||
EXPLAIN (costs off)
|
||||
WITH cte AS (
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM
|
||||
source_table_1
|
||||
)
|
||||
INSERT INTO target_table
|
||||
SELECT
|
||||
source_table_1.col_1,
|
||||
source_table_1.col_2
|
||||
FROM cte, source_table_1
|
||||
WHERE cte.col_1 = source_table_1.col_1 ON CONFLICT DO NOTHING;
|
||||
$Q$);
|
||||
coordinator_plan
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus INSERT ... SELECT)
|
||||
INSERT/SELECT method: repartition
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Task Count: 4
|
||||
(4 rows)
|
||||
|
||||
-- Tests with foreign key to reference table
|
||||
CREATE TABLE test_ref_table (key int PRIMARY KEY);
|
||||
SELECT create_reference_table('test_ref_table');
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO test_ref_table VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
|
||||
ALTER TABLE target_table ADD CONSTRAINT fkey FOREIGN KEY (col_1) REFERENCES test_ref_table(key) ON DELETE CASCADE;
|
||||
BEGIN;
|
||||
TRUNCATE test_ref_table CASCADE;
|
||||
NOTICE: truncate cascades to table "target_table"
|
||||
INSERT INTO
|
||||
target_table
|
||||
SELECT
|
||||
col_2,
|
||||
col_1
|
||||
FROM source_table_1 ON CONFLICT (col_1) DO UPDATE SET col_2 = 55 RETURNING *;
|
||||
ERROR: insert or update on table "target_table_xxxxxxx" violates foreign key constraint "fkey_xxxxxxx"
|
||||
DETAIL: Key (col_1)=(X) is not present in table "test_ref_table_xxxxxxx".
|
||||
CONTEXT: while executing command on localhost:xxxxx
|
||||
ROLLBACK;
|
||||
BEGIN;
|
||||
DELETE FROM test_ref_table WHERE key > 10;
|
||||
WITH r AS (
|
||||
INSERT INTO
|
||||
target_table
|
||||
SELECT
|
||||
col_2,
|
||||
col_1
|
||||
FROM source_table_1 ON CONFLICT (col_1) DO UPDATE SET col_2 = 1 RETURNING *)
|
||||
SELECT * FROM r ORDER BY col_1;
|
||||
col_1 | col_2
|
||||
---------------------------------------------------------------------
|
||||
1 | 1
|
||||
2 | 1
|
||||
3 | 1
|
||||
4 | 1
|
||||
5 | 1
|
||||
(5 rows)
|
||||
|
||||
ROLLBACK;
|
||||
-- Following two queries are supported since we no not modify but only select from
|
||||
-- the target_table after modification on test_ref_table.
|
||||
BEGIN;
|
||||
TRUNCATE test_ref_table CASCADE;
|
||||
NOTICE: truncate cascades to table "target_table"
|
||||
INSERT INTO
|
||||
source_table_1
|
||||
SELECT
|
||||
col_2,
|
||||
col_1
|
||||
FROM target_table ON CONFLICT (col_1) DO UPDATE SET col_2 = 55 RETURNING *;
|
||||
col_1 | col_2 | col_3
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
ROLLBACK;
|
||||
BEGIN;
|
||||
DELETE FROM test_ref_table;
|
||||
INSERT INTO
|
||||
source_table_1
|
||||
SELECT
|
||||
col_2,
|
||||
col_1
|
||||
FROM target_table ON CONFLICT (col_1) DO UPDATE SET col_2 = 55 RETURNING *;
|
||||
col_1 | col_2 | col_3
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
ROLLBACK;
|
||||
-- INSERT .. SELECT with different column types
|
||||
CREATE TABLE source_table_3(col_1 numeric, col_2 numeric, col_3 numeric);
|
||||
SELECT create_distributed_table('source_table_3','col_1');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO source_table_3 VALUES(1,11,1),(2,22,2),(3,33,3),(4,44,4),(5,55,5);
|
||||
CREATE TABLE source_table_4(id int, arr_val text[]);
|
||||
SELECT create_distributed_table('source_table_4','id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO source_table_4 VALUES(1, '{"abc","cde","efg"}'), (2, '{"xyz","tvu"}');
|
||||
CREATE TABLE target_table_2(id int primary key, arr_val char(10)[]);
|
||||
SELECT create_distributed_table('target_table_2','id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO target_table_2 VALUES(1, '{"abc","def","gyx"}');
|
||||
SET client_min_messages to debug1;
|
||||
INSERT INTO target_table
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM
|
||||
source_table_3
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2;
|
||||
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
|
||||
DETAIL: The data type of the target table's partition column should exactly match the data type of the corresponding simple column reference in the subquery.
|
||||
DEBUG: performing repartitioned INSERT ... SELECT
|
||||
SELECT * FROM target_table ORDER BY 1;
|
||||
col_1 | col_2
|
||||
---------------------------------------------------------------------
|
||||
1 | 11
|
||||
2 | 22
|
||||
3 | 33
|
||||
4 | 44
|
||||
5 | 55
|
||||
6 | 7
|
||||
7 | 8
|
||||
8 | 9
|
||||
9 | 10
|
||||
10 | 11
|
||||
(10 rows)
|
||||
|
||||
INSERT INTO target_table_2
|
||||
SELECT
|
||||
*
|
||||
FROM
|
||||
source_table_4
|
||||
ON CONFLICT DO NOTHING;
|
||||
SELECT * FROM target_table_2 ORDER BY 1;
|
||||
id | arr_val
|
||||
---------------------------------------------------------------------
|
||||
1 | {"abc ","def ","gyx "}
|
||||
2 | {"xyz ","tvu "}
|
||||
(2 rows)
|
||||
|
||||
RESET client_min_messages;
|
||||
-- Test with shard_replication_factor = 2
|
||||
SET citus.shard_replication_factor to 2;
|
||||
DROP TABLE target_table, source_table_1, source_table_2;
|
||||
CREATE TABLE target_table(col_1 int primary key, col_2 int);
|
||||
SELECT create_distributed_table('target_table','col_1');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO target_table VALUES(1,2),(2,3),(3,4),(4,5),(5,6);
|
||||
CREATE TABLE source_table_1(col_1 int, col_2 int, col_3 int);
|
||||
SELECT create_distributed_table('source_table_1','col_1');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO source_table_1 VALUES(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5);
|
||||
CREATE TABLE source_table_2(col_1 int, col_2 int, col_3 int);
|
||||
SELECT create_distributed_table('source_table_2','col_1');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO source_table_2 VALUES(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10);
|
||||
SET client_min_messages to debug1;
|
||||
-- Generate series directly on the coordinator and on conflict do nothing
|
||||
INSERT INTO target_table (col_1, col_2)
|
||||
SELECT
|
||||
s, s
|
||||
FROM
|
||||
generate_series(1,10) s
|
||||
ON CONFLICT DO NOTHING;
|
||||
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
-- Test with multiple subqueries
|
||||
INSERT INTO target_table
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM (
|
||||
(SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table_1
|
||||
LIMIT 5)
|
||||
UNION
|
||||
(SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table_2
|
||||
LIMIT 5)
|
||||
) as foo
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = 0;
|
||||
DEBUG: Set operations are not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: push down of limit count: 5
|
||||
DEBUG: generating subplan XXX_1 for subquery SELECT col_1, col_2, col_3 FROM on_conflict.source_table_1 LIMIT 5
|
||||
DEBUG: push down of limit count: 5
|
||||
DEBUG: generating subplan XXX_2 for subquery SELECT col_1, col_2, col_3 FROM on_conflict.source_table_2 LIMIT 5
|
||||
DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer) UNION SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) foo
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
SELECT * FROM target_table ORDER BY 1;
|
||||
col_1 | col_2
|
||||
---------------------------------------------------------------------
|
||||
1 | 0
|
||||
2 | 0
|
||||
3 | 0
|
||||
4 | 0
|
||||
5 | 0
|
||||
6 | 0
|
||||
7 | 0
|
||||
8 | 0
|
||||
9 | 0
|
||||
10 | 0
|
||||
(10 rows)
|
||||
|
||||
WITH cte AS MATERIALIZED(
|
||||
SELECT col_1, col_2, col_3 FROM source_table_1
|
||||
), cte_2 AS MATERIALIZED(
|
||||
SELECT col_1, col_2 FROM cte
|
||||
)
|
||||
INSERT INTO target_table SELECT * FROM cte_2 ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 + 1;
|
||||
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
|
||||
DEBUG: generating subplan XXX_1 for CTE cte: SELECT col_1, col_2, col_3 FROM on_conflict.source_table_1
|
||||
DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) cte
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT cte_2.col_1, cte_2.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte_2) citus_insert_select_subquery
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
SELECT * FROM target_table ORDER BY 1;
|
||||
col_1 | col_2
|
||||
---------------------------------------------------------------------
|
||||
1 | 2
|
||||
2 | 3
|
||||
3 | 4
|
||||
4 | 5
|
||||
5 | 6
|
||||
6 | 0
|
||||
7 | 0
|
||||
8 | 0
|
||||
9 | 0
|
||||
10 | 0
|
||||
(10 rows)
|
||||
|
||||
-- make sure that even if COPY switchover happens
|
||||
-- the results are correct
|
||||
SET citus.copy_switchover_threshold TO 1;
|
||||
TRUNCATE target_table;
|
||||
-- load some data to make sure copy commands switch over connections
|
||||
INSERT INTO target_table SELECT i,0 FROM generate_series(0,500)i;
|
||||
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
-- make sure that SELECT only uses 1 connection 1 node
|
||||
-- yet still COPY commands use 1 connection per co-located
|
||||
-- intermediate result file
|
||||
SET citus.max_adaptive_executor_pool_size TO 1;
|
||||
INSERT INTO target_table SELECT * FROM target_table LIMIT 10000 ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 + 1;
|
||||
DEBUG: LIMIT clauses are not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: push down of limit count: 10000
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
SELECT DISTINCT col_2 FROM target_table;
|
||||
col_2
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
WITH cte_1 AS (INSERT INTO target_table SELECT * FROM target_table LIMIT 10000 ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 + 1 RETURNING *)
|
||||
SELECT DISTINCT col_2 FROM cte_1;
|
||||
DEBUG: generating subplan XXX_1 for CTE cte_1: INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_1, col_2 FROM on_conflict.target_table LIMIT 10000 ON CONFLICT(col_1) DO UPDATE SET col_2 = (excluded.col_2 OPERATOR(pg_catalog.+) 1) RETURNING target_table.col_1, target_table.col_2
|
||||
DEBUG: LIMIT clauses are not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: push down of limit count: 10000
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte_1
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
col_2
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
RESET client_min_messages;
|
||||
DROP SCHEMA on_conflict CASCADE;
|
||||
NOTICE: drop cascades to 7 other objects
|
||||
DETAIL: drop cascades to table test_ref_table
|
||||
drop cascades to table source_table_3
|
||||
drop cascades to table source_table_4
|
||||
drop cascades to table target_table_2
|
||||
drop cascades to table target_table
|
||||
drop cascades to table source_table_1
|
||||
drop cascades to table source_table_2
|
|
@ -1,6 +1,16 @@
|
|||
--
|
||||
-- MULTI_METADATA_SYNC
|
||||
--
|
||||
-- this test has different output for PG13/14 compared to PG15
|
||||
-- In PG15, public schema is owned by pg_database_owner role
|
||||
-- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- Tests for metadata snapshot functions, metadata syncing functions and propagation of
|
||||
-- metadata changes to MX tables.
|
||||
-- Turn metadata sync off at first
|
||||
|
@ -63,7 +73,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
unnest
|
||||
---------------------------------------------------------------------
|
||||
ALTER DATABASE regression OWNER TO postgres;
|
||||
CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres
|
||||
CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION pg_database_owner
|
||||
DELETE FROM pg_catalog.pg_dist_colocation
|
||||
DELETE FROM pg_catalog.pg_dist_object
|
||||
DELETE FROM pg_dist_node
|
||||
|
@ -71,9 +81,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
DELETE FROM pg_dist_placement
|
||||
DELETE FROM pg_dist_shard
|
||||
GRANT CREATE ON SCHEMA public TO PUBLIC;
|
||||
GRANT CREATE ON SCHEMA public TO postgres;
|
||||
GRANT CREATE ON SCHEMA public TO pg_database_owner;
|
||||
GRANT USAGE ON SCHEMA public TO PUBLIC;
|
||||
GRANT USAGE ON SCHEMA public TO postgres;
|
||||
GRANT USAGE ON SCHEMA public TO pg_database_owner;
|
||||
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
|
||||
RESET ROLE
|
||||
RESET ROLE
|
||||
|
@ -81,8 +91,8 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''')
|
||||
SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
SET ROLE postgres
|
||||
SET ROLE postgres
|
||||
SET ROLE pg_database_owner
|
||||
SET ROLE pg_database_owner
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
|
@ -131,7 +141,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
ALTER SEQUENCE public.user_defined_seq OWNER TO postgres
|
||||
ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
|
||||
ALTER TABLE public.mx_test_table OWNER TO postgres
|
||||
CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres
|
||||
CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION pg_database_owner
|
||||
CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass))
|
||||
DELETE FROM pg_catalog.pg_dist_colocation
|
||||
DELETE FROM pg_catalog.pg_dist_object
|
||||
|
@ -140,9 +150,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
DELETE FROM pg_dist_placement
|
||||
DELETE FROM pg_dist_shard
|
||||
GRANT CREATE ON SCHEMA public TO PUBLIC;
|
||||
GRANT CREATE ON SCHEMA public TO postgres;
|
||||
GRANT CREATE ON SCHEMA public TO pg_database_owner;
|
||||
GRANT USAGE ON SCHEMA public TO PUBLIC;
|
||||
GRANT USAGE ON SCHEMA public TO postgres;
|
||||
GRANT USAGE ON SCHEMA public TO pg_database_owner;
|
||||
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
|
||||
RESET ROLE
|
||||
RESET ROLE
|
||||
|
@ -155,8 +165,8 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''')
|
||||
SELECT worker_create_truncate_trigger('public.mx_test_table')
|
||||
SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
SET ROLE postgres
|
||||
SET ROLE postgres
|
||||
SET ROLE pg_database_owner
|
||||
SET ROLE pg_database_owner
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
|
@ -181,7 +191,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
|
||||
ALTER TABLE public.mx_test_table OWNER TO postgres
|
||||
CREATE INDEX mx_index ON public.mx_test_table USING btree (col_2)
|
||||
CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres
|
||||
CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION pg_database_owner
|
||||
CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass))
|
||||
DELETE FROM pg_catalog.pg_dist_colocation
|
||||
DELETE FROM pg_catalog.pg_dist_object
|
||||
|
@ -190,9 +200,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
DELETE FROM pg_dist_placement
|
||||
DELETE FROM pg_dist_shard
|
||||
GRANT CREATE ON SCHEMA public TO PUBLIC;
|
||||
GRANT CREATE ON SCHEMA public TO postgres;
|
||||
GRANT CREATE ON SCHEMA public TO pg_database_owner;
|
||||
GRANT USAGE ON SCHEMA public TO PUBLIC;
|
||||
GRANT USAGE ON SCHEMA public TO postgres;
|
||||
GRANT USAGE ON SCHEMA public TO pg_database_owner;
|
||||
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
|
||||
RESET ROLE
|
||||
RESET ROLE
|
||||
|
@ -205,8 +215,8 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''')
|
||||
SELECT worker_create_truncate_trigger('public.mx_test_table')
|
||||
SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
SET ROLE postgres
|
||||
SET ROLE postgres
|
||||
SET ROLE pg_database_owner
|
||||
SET ROLE pg_database_owner
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
|
@ -233,7 +243,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
|
||||
CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2)
|
||||
CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres
|
||||
CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres
|
||||
CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION pg_database_owner
|
||||
CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass))
|
||||
DELETE FROM pg_catalog.pg_dist_colocation
|
||||
DELETE FROM pg_catalog.pg_dist_object
|
||||
|
@ -242,9 +252,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
DELETE FROM pg_dist_placement
|
||||
DELETE FROM pg_dist_shard
|
||||
GRANT CREATE ON SCHEMA public TO PUBLIC;
|
||||
GRANT CREATE ON SCHEMA public TO postgres;
|
||||
GRANT CREATE ON SCHEMA public TO pg_database_owner;
|
||||
GRANT USAGE ON SCHEMA public TO PUBLIC;
|
||||
GRANT USAGE ON SCHEMA public TO postgres;
|
||||
GRANT USAGE ON SCHEMA public TO pg_database_owner;
|
||||
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
|
||||
RESET ROLE
|
||||
RESET ROLE
|
||||
|
@ -257,8 +267,8 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''')
|
||||
SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table')
|
||||
SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
SET ROLE postgres
|
||||
SET ROLE postgres
|
||||
SET ROLE pg_database_owner
|
||||
SET ROLE pg_database_owner
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
|
@ -291,7 +301,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
|
||||
CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2)
|
||||
CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres
|
||||
CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres
|
||||
CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION pg_database_owner
|
||||
CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass))
|
||||
DELETE FROM pg_catalog.pg_dist_colocation
|
||||
DELETE FROM pg_catalog.pg_dist_object
|
||||
|
@ -300,9 +310,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
DELETE FROM pg_dist_placement
|
||||
DELETE FROM pg_dist_shard
|
||||
GRANT CREATE ON SCHEMA public TO PUBLIC;
|
||||
GRANT CREATE ON SCHEMA public TO postgres;
|
||||
GRANT CREATE ON SCHEMA public TO pg_database_owner;
|
||||
GRANT USAGE ON SCHEMA public TO PUBLIC;
|
||||
GRANT USAGE ON SCHEMA public TO postgres;
|
||||
GRANT USAGE ON SCHEMA public TO pg_database_owner;
|
||||
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
|
||||
RESET ROLE
|
||||
RESET ROLE
|
||||
|
@ -315,8 +325,8 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''')
|
||||
SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table')
|
||||
SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
SET ROLE postgres
|
||||
SET ROLE postgres
|
||||
SET ROLE pg_database_owner
|
||||
SET ROLE pg_database_owner
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
|
@ -342,7 +352,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
|
||||
CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2)
|
||||
CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres
|
||||
CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres
|
||||
CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION pg_database_owner
|
||||
CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass))
|
||||
DELETE FROM pg_catalog.pg_dist_colocation
|
||||
DELETE FROM pg_catalog.pg_dist_object
|
||||
|
@ -351,9 +361,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
DELETE FROM pg_dist_placement
|
||||
DELETE FROM pg_dist_shard
|
||||
GRANT CREATE ON SCHEMA public TO PUBLIC;
|
||||
GRANT CREATE ON SCHEMA public TO postgres;
|
||||
GRANT CREATE ON SCHEMA public TO pg_database_owner;
|
||||
GRANT USAGE ON SCHEMA public TO PUBLIC;
|
||||
GRANT USAGE ON SCHEMA public TO postgres;
|
||||
GRANT USAGE ON SCHEMA public TO pg_database_owner;
|
||||
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
|
||||
RESET ROLE
|
||||
RESET ROLE
|
||||
|
@ -366,8 +376,8 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''')
|
||||
SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table')
|
||||
SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
SET ROLE postgres
|
||||
SET ROLE postgres
|
||||
SET ROLE pg_database_owner
|
||||
SET ROLE pg_database_owner
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
|
@ -1867,7 +1877,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
CREATE SCHEMA IF NOT EXISTS mx_test_schema_2 AUTHORIZATION postgres
|
||||
CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres
|
||||
CREATE SCHEMA IF NOT EXISTS mx_testing_schema_2 AUTHORIZATION postgres
|
||||
CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres
|
||||
CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION pg_database_owner
|
||||
CREATE TABLE mx_test_schema_1.mx_table_1 (col1 integer, col2 text, col3 integer)
|
||||
CREATE TABLE mx_test_schema_2.mx_table_2 (col1 integer, col2 text)
|
||||
CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass))
|
||||
|
@ -1881,9 +1891,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
DELETE FROM pg_dist_placement
|
||||
DELETE FROM pg_dist_shard
|
||||
GRANT CREATE ON SCHEMA public TO PUBLIC;
|
||||
GRANT CREATE ON SCHEMA public TO postgres;
|
||||
GRANT CREATE ON SCHEMA public TO pg_database_owner;
|
||||
GRANT USAGE ON SCHEMA public TO PUBLIC;
|
||||
GRANT USAGE ON SCHEMA public TO postgres;
|
||||
GRANT USAGE ON SCHEMA public TO pg_database_owner;
|
||||
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (4, 1, 'localhost', 8888, 'default', FALSE, FALSE, TRUE, 'secondary'::noderole, 'default', TRUE),(5, 1, 'localhost', 8889, 'default', FALSE, FALSE, TRUE, 'secondary'::noderole, 'second-cluster', TRUE),(1, 1, 'localhost', 57637, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', TRUE),(7, 5, 'localhost', 57638, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', TRUE)
|
||||
RESET ROLE
|
||||
RESET ROLE
|
||||
|
@ -1908,8 +1918,8 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
SELECT worker_create_truncate_trigger('public.mx_ref')
|
||||
SELECT worker_create_truncate_trigger('public.test_table')
|
||||
SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
SET ROLE postgres
|
||||
SET ROLE postgres
|
||||
SET ROLE pg_database_owner
|
||||
SET ROLE pg_database_owner
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,4 +1,19 @@
|
|||
--
|
||||
-- MULTI_MX_INSERT_SELECT_REPARTITION
|
||||
--
|
||||
-- Test behaviour of repartitioned INSERT ... SELECT in MX setup
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
CREATE SCHEMA multi_mx_insert_select_repartition;
|
||||
SET search_path TO multi_mx_insert_select_repartition;
|
||||
SET citus.next_shard_id TO 4213581;
|
||||
|
@ -92,8 +107,8 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM multi_mx_i
|
|||
NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_4213581_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_4213581_to','SELECT (a OPERATOR(pg_catalog.*) 2) AS a FROM multi_mx_insert_select_repartition.source_table_4213581 source_table WHERE true',0,'hash','{-2147483648,-715827883,715827882}'::text[],'{-715827884,715827881,2147483647}'::text[],true) WHERE rows_written > 0
|
||||
NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_4213583_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_4213583_to','SELECT (a OPERATOR(pg_catalog.*) 2) AS a FROM multi_mx_insert_select_repartition.source_table_4213583 source_table WHERE true',0,'hash','{-2147483648,-715827883,715827882}'::text[],'{-715827884,715827881,2147483647}'::text[],true) WHERE rows_written > 0
|
||||
NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartitioned_results_xxxxx_from_4213582_to_0','repartitioned_results_xxxxx_from_4213584_to_0']::text[],'localhost',57638) bytes
|
||||
NOTICE: executing the command locally: INSERT INTO multi_mx_insert_select_repartition.target_table_4213585 AS citus_table_alias (a) SELECT a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_0,repartitioned_results_xxxxx_from_4213582_to_0,repartitioned_results_xxxxx_from_4213584_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer) RETURNING citus_table_alias.a
|
||||
NOTICE: executing the command locally: INSERT INTO multi_mx_insert_select_repartition.target_table_4213587 AS citus_table_alias (a) SELECT a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer) RETURNING citus_table_alias.a
|
||||
NOTICE: executing the command locally: INSERT INTO multi_mx_insert_select_repartition.target_table_4213585 AS citus_table_alias (a) SELECT intermediate_result.a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_0,repartitioned_results_xxxxx_from_4213582_to_0,repartitioned_results_xxxxx_from_4213584_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer) RETURNING citus_table_alias.a
|
||||
NOTICE: executing the command locally: INSERT INTO multi_mx_insert_select_repartition.target_table_4213587 AS citus_table_alias (a) SELECT intermediate_result.a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer) RETURNING citus_table_alias.a
|
||||
a
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
|
|
|
@ -0,0 +1,166 @@
|
|||
--
|
||||
-- MULTI_MX_INSERT_SELECT_REPARTITION
|
||||
--
|
||||
-- Test behaviour of repartitioned INSERT ... SELECT in MX setup
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
||||
CREATE SCHEMA multi_mx_insert_select_repartition;
|
||||
SET search_path TO multi_mx_insert_select_repartition;
|
||||
SET citus.next_shard_id TO 4213581;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.shard_count TO 4;
|
||||
CREATE TABLE source_table(a int, b int);
|
||||
SELECT create_distributed_table('source_table', 'a');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO source_table SELECT floor(i/4), i*i FROM generate_series(1, 20) i;
|
||||
SET citus.shard_count TO 3;
|
||||
CREATE TABLE target_table(a int, b int);
|
||||
SELECT create_distributed_table('target_table', 'a');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE FUNCTION square(int) RETURNS INT
|
||||
AS $$ SELECT $1 * $1 $$
|
||||
LANGUAGE SQL;
|
||||
select create_distributed_function('square(int)');
|
||||
NOTICE: procedure multi_mx_insert_select_repartition.square is already distributed
|
||||
DETAIL: Citus distributes procedures with CREATE [PROCEDURE|FUNCTION|AGGREGATE] commands
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
select public.colocate_proc_with_table('square', 'source_table'::regclass, 0);
|
||||
colocate_proc_with_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Test along with function delegation
|
||||
-- function delegation only happens for "SELECT f()", and we don't use
|
||||
-- repartitioned INSERT/SELECT when task count is 1, so the following
|
||||
-- should go via coordinator
|
||||
EXPLAIN (costs off) INSERT INTO target_table(a) SELECT square(4);
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus INSERT ... SELECT)
|
||||
INSERT/SELECT method: pull to coordinator
|
||||
-> Result
|
||||
(3 rows)
|
||||
|
||||
INSERT INTO target_table(a) SELECT square(4);
|
||||
SELECT * FROM target_table;
|
||||
a | b
|
||||
---------------------------------------------------------------------
|
||||
16 |
|
||||
(1 row)
|
||||
|
||||
TRUNCATE target_table;
|
||||
--
|
||||
-- Test repartitioned INSERT/SELECT from MX worker
|
||||
--
|
||||
\c - - - :worker_1_port
|
||||
SET search_path TO multi_mx_insert_select_repartition;
|
||||
EXPLAIN (costs off) INSERT INTO target_table SELECT a, max(b) FROM source_table GROUP BY a;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus INSERT ... SELECT)
|
||||
INSERT/SELECT method: repartition
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: a
|
||||
-> Seq Scan on source_table_4213581 source_table
|
||||
(10 rows)
|
||||
|
||||
INSERT INTO target_table SELECT a, max(b) FROM source_table GROUP BY a;
|
||||
SET citus.log_local_commands to on;
|
||||
-- INSERT .. SELECT via repartitioning with local execution
|
||||
BEGIN;
|
||||
select count(*) from source_table WHERE a = 1;
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM multi_mx_insert_select_repartition.source_table_4213581 source_table WHERE (a OPERATOR(pg_catalog.=) 1)
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
||||
insert into target_table SELECT a*2 FROM source_table RETURNING a;
|
||||
NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_4213581_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_4213581_to','SELECT (a OPERATOR(pg_catalog.*) 2) AS a FROM multi_mx_insert_select_repartition.source_table_4213581 source_table WHERE true',0,'hash','{-2147483648,-715827883,715827882}'::text[],'{-715827884,715827881,2147483647}'::text[],true) WHERE rows_written > 0
|
||||
NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_4213583_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_4213583_to','SELECT (a OPERATOR(pg_catalog.*) 2) AS a FROM multi_mx_insert_select_repartition.source_table_4213583 source_table WHERE true',0,'hash','{-2147483648,-715827883,715827882}'::text[],'{-715827884,715827881,2147483647}'::text[],true) WHERE rows_written > 0
|
||||
NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartitioned_results_xxxxx_from_4213582_to_0','repartitioned_results_xxxxx_from_4213584_to_0']::text[],'localhost',57638) bytes
|
||||
NOTICE: executing the command locally: INSERT INTO multi_mx_insert_select_repartition.target_table_4213585 AS citus_table_alias (a) SELECT a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_0,repartitioned_results_xxxxx_from_4213582_to_0,repartitioned_results_xxxxx_from_4213584_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer) RETURNING citus_table_alias.a
|
||||
NOTICE: executing the command locally: INSERT INTO multi_mx_insert_select_repartition.target_table_4213587 AS citus_table_alias (a) SELECT a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer) RETURNING citus_table_alias.a
|
||||
a
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
0
|
||||
0
|
||||
2
|
||||
2
|
||||
2
|
||||
2
|
||||
4
|
||||
4
|
||||
4
|
||||
4
|
||||
6
|
||||
6
|
||||
6
|
||||
6
|
||||
8
|
||||
8
|
||||
8
|
||||
8
|
||||
10
|
||||
(20 rows)
|
||||
|
||||
ROLLBACK;
|
||||
BEGIN;
|
||||
select count(*) from source_table WHERE a = 1;
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM multi_mx_insert_select_repartition.source_table_4213581 source_table WHERE (a OPERATOR(pg_catalog.=) 1)
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
||||
insert into target_table SELECT a FROM source_table LIMIT 10;
|
||||
NOTICE: executing the command locally: SELECT a FROM multi_mx_insert_select_repartition.source_table_4213581 source_table WHERE true LIMIT '10'::bigint
|
||||
NOTICE: executing the command locally: SELECT a FROM multi_mx_insert_select_repartition.source_table_4213583 source_table WHERE true LIMIT '10'::bigint
|
||||
NOTICE: executing the copy locally for shard xxxxx
|
||||
ROLLBACK;
|
||||
\c - - - :master_port
|
||||
SET search_path TO multi_mx_insert_select_repartition;
|
||||
SELECT * FROM target_table ORDER BY a;
|
||||
a | b
|
||||
---------------------------------------------------------------------
|
||||
0 | 9
|
||||
1 | 49
|
||||
2 | 121
|
||||
3 | 225
|
||||
4 | 361
|
||||
5 | 400
|
||||
(6 rows)
|
||||
|
||||
RESET client_min_messages;
|
||||
\set VERBOSITY terse
|
||||
DROP SCHEMA multi_mx_insert_select_repartition CASCADE;
|
||||
NOTICE: drop cascades to 3 other objects
|
|
@ -306,12 +306,14 @@ EXPLAIN (COSTS FALSE)
|
|||
-- check the plan if the hash aggreate is disabled. We expect to see sort+unique
|
||||
-- instead of aggregate plan node to handle distinct.
|
||||
SET enable_hashagg TO off;
|
||||
SELECT public.plan_without_result_lines($Q$
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT DISTINCT count(*)
|
||||
FROM lineitem_hash_part
|
||||
GROUP BY l_suppkey, l_linenumber
|
||||
ORDER BY 1;
|
||||
QUERY PLAN
|
||||
$Q$);
|
||||
plan_without_result_lines
|
||||
---------------------------------------------------------------------
|
||||
Unique
|
||||
-> Sort
|
||||
|
@ -380,13 +382,15 @@ EXPLAIN (COSTS FALSE)
|
|||
-- check the plan if the hash aggreate is disabled. Similar to the explain of
|
||||
-- the query above.
|
||||
SET enable_hashagg TO off;
|
||||
SELECT public.plan_without_result_lines($Q$
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT DISTINCT l_suppkey, count(*)
|
||||
FROM lineitem_hash_part
|
||||
GROUP BY l_suppkey, l_linenumber
|
||||
ORDER BY 1
|
||||
LIMIT 10;
|
||||
QUERY PLAN
|
||||
$Q$);
|
||||
plan_without_result_lines
|
||||
---------------------------------------------------------------------
|
||||
Limit
|
||||
-> Unique
|
||||
|
@ -457,13 +461,15 @@ EXPLAIN (COSTS FALSE)
|
|||
-- check the plan if the hash aggreate is disabled. This explain errors out due
|
||||
-- to a bug right now, expectation must be corrected after fixing it.
|
||||
SET enable_hashagg TO off;
|
||||
SELECT public.plan_without_result_lines($Q$
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT DISTINCT l_suppkey, avg(l_partkey)
|
||||
FROM lineitem_hash_part
|
||||
GROUP BY l_suppkey, l_linenumber
|
||||
ORDER BY 1,2
|
||||
LIMIT 10;
|
||||
QUERY PLAN
|
||||
$Q$);
|
||||
plan_without_result_lines
|
||||
---------------------------------------------------------------------
|
||||
Limit
|
||||
-> Unique
|
||||
|
@ -533,13 +539,15 @@ EXPLAIN (COSTS FALSE)
|
|||
-- check the plan if the hash aggreate is disabled. We expect to see sort+unique to
|
||||
-- handle distinct on.
|
||||
SET enable_hashagg TO off;
|
||||
SELECT public.plan_without_result_lines($Q$
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT DISTINCT ON (l_suppkey) avg(l_partkey)
|
||||
FROM lineitem_hash_part
|
||||
GROUP BY l_suppkey, l_linenumber
|
||||
ORDER BY l_suppkey,1
|
||||
LIMIT 10;
|
||||
QUERY PLAN
|
||||
$Q$);
|
||||
plan_without_result_lines
|
||||
---------------------------------------------------------------------
|
||||
Limit
|
||||
-> Unique
|
||||
|
@ -608,13 +616,15 @@ EXPLAIN (COSTS FALSE)
|
|||
-- check the plan if the hash aggreate is disabled. This explain errors out due
|
||||
-- to a bug right now, expectation must be corrected after fixing it.
|
||||
SET enable_hashagg TO off;
|
||||
SELECT public.plan_without_result_lines($Q$
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT DISTINCT avg(ceil(l_partkey / 2))
|
||||
FROM lineitem_hash_part
|
||||
GROUP BY l_suppkey, l_linenumber
|
||||
ORDER BY 1
|
||||
LIMIT 10;
|
||||
QUERY PLAN
|
||||
$Q$);
|
||||
plan_without_result_lines
|
||||
---------------------------------------------------------------------
|
||||
Limit
|
||||
-> Unique
|
||||
|
@ -683,13 +693,15 @@ EXPLAIN (COSTS FALSE)
|
|||
-- check the plan if the hash aggreate is disabled. This explain errors out due
|
||||
-- to a bug right now, expectation must be corrected after fixing it.
|
||||
SET enable_hashagg TO off;
|
||||
SELECT public.plan_without_result_lines($Q$
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT DISTINCT sum(l_suppkey) + count(l_partkey) AS dis
|
||||
FROM lineitem_hash_part
|
||||
GROUP BY l_suppkey, l_linenumber
|
||||
ORDER BY 1
|
||||
LIMIT 10;
|
||||
QUERY PLAN
|
||||
$Q$);
|
||||
plan_without_result_lines
|
||||
---------------------------------------------------------------------
|
||||
Limit
|
||||
-> Unique
|
||||
|
@ -733,13 +745,15 @@ SELECT DISTINCT *
|
|||
|
||||
-- explain the query to see actual plan. We expect to see only one aggregation
|
||||
-- node since group by columns guarantees the uniqueness.
|
||||
SELECT coordinator_plan($Q$
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT DISTINCT *
|
||||
FROM lineitem_hash_part
|
||||
GROUP BY 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16
|
||||
ORDER BY 1,2
|
||||
LIMIT 10;
|
||||
QUERY PLAN
|
||||
$Q$);
|
||||
coordinator_plan
|
||||
---------------------------------------------------------------------
|
||||
Limit
|
||||
-> Sort
|
||||
|
@ -748,28 +762,20 @@ EXPLAIN (COSTS FALSE)
|
|||
Group Key: remote_scan.l_orderkey, remote_scan.l_partkey, remote_scan.l_suppkey, remote_scan.l_linenumber, remote_scan.l_quantity, remote_scan.l_extendedprice, remote_scan.l_discount, remote_scan.l_tax, remote_scan.l_returnflag, remote_scan.l_linestatus, remote_scan.l_shipdate, remote_scan.l_commitdate, remote_scan.l_receiptdate, remote_scan.l_shipinstruct, remote_scan.l_shipmode, remote_scan.l_comment
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Limit
|
||||
-> Unique
|
||||
-> Group
|
||||
Group Key: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
|
||||
-> Sort
|
||||
Sort Key: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
|
||||
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
|
||||
(17 rows)
|
||||
(7 rows)
|
||||
|
||||
-- check the plan if the hash aggreate is disabled. We expect to see only one
|
||||
-- aggregation node since group by columns guarantees the uniqueness.
|
||||
SET enable_hashagg TO off;
|
||||
SELECT coordinator_plan($Q$
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT DISTINCT *
|
||||
FROM lineitem_hash_part
|
||||
GROUP BY 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16
|
||||
ORDER BY 1,2
|
||||
LIMIT 10;
|
||||
QUERY PLAN
|
||||
$Q$);
|
||||
coordinator_plan
|
||||
---------------------------------------------------------------------
|
||||
Limit
|
||||
-> Unique
|
||||
|
@ -777,17 +783,7 @@ EXPLAIN (COSTS FALSE)
|
|||
Sort Key: remote_scan.l_orderkey, remote_scan.l_partkey, remote_scan.l_suppkey, remote_scan.l_linenumber, remote_scan.l_quantity, remote_scan.l_extendedprice, remote_scan.l_discount, remote_scan.l_tax, remote_scan.l_returnflag, remote_scan.l_linestatus, remote_scan.l_shipdate, remote_scan.l_commitdate, remote_scan.l_receiptdate, remote_scan.l_shipinstruct, remote_scan.l_shipmode, remote_scan.l_comment
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Limit
|
||||
-> Unique
|
||||
-> Group
|
||||
Group Key: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
|
||||
-> Sort
|
||||
Sort Key: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
|
||||
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
|
||||
(16 rows)
|
||||
(6 rows)
|
||||
|
||||
SET enable_hashagg TO on;
|
||||
-- distinct on count distinct
|
||||
|
@ -914,12 +910,14 @@ EXPLAIN (COSTS FALSE)
|
|||
|
||||
-- check the plan if the hash aggreate is disabled
|
||||
SET enable_hashagg TO off;
|
||||
SELECT public.plan_without_result_lines($Q$
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT DISTINCT ceil(count(case when l_partkey > 100000 THEN 1 ELSE 0 END) / 2) AS count
|
||||
FROM lineitem_hash_part
|
||||
GROUP BY l_suppkey
|
||||
ORDER BY 1;
|
||||
QUERY PLAN
|
||||
$Q$);
|
||||
plan_without_result_lines
|
||||
---------------------------------------------------------------------
|
||||
Unique
|
||||
-> Sort
|
||||
|
@ -940,13 +938,15 @@ EXPLAIN (COSTS FALSE)
|
|||
|
||||
SET enable_hashagg TO on;
|
||||
-- explain the query to see actual plan with array_agg aggregation.
|
||||
SELECT coordinator_plan($Q$
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT DISTINCT array_agg(l_linenumber), array_length(array_agg(l_linenumber), 1)
|
||||
FROM lineitem_hash_part
|
||||
GROUP BY l_orderkey
|
||||
ORDER BY 2
|
||||
LIMIT 15;
|
||||
QUERY PLAN
|
||||
$Q$);
|
||||
coordinator_plan
|
||||
---------------------------------------------------------------------
|
||||
Limit
|
||||
-> Sort
|
||||
|
@ -955,25 +955,19 @@ EXPLAIN (COSTS FALSE)
|
|||
Group Key: remote_scan.array_length, remote_scan.array_agg
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> GroupAggregate
|
||||
Group Key: l_orderkey
|
||||
-> Sort
|
||||
Sort Key: l_orderkey
|
||||
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
|
||||
(15 rows)
|
||||
(7 rows)
|
||||
|
||||
-- check the plan if the hash aggreate is disabled.
|
||||
SET enable_hashagg TO off;
|
||||
SELECT coordinator_plan($Q$
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT DISTINCT array_agg(l_linenumber), array_length(array_agg(l_linenumber), 1)
|
||||
FROM lineitem_hash_part
|
||||
GROUP BY l_orderkey
|
||||
ORDER BY 2
|
||||
LIMIT 15;
|
||||
QUERY PLAN
|
||||
$Q$);
|
||||
coordinator_plan
|
||||
---------------------------------------------------------------------
|
||||
Limit
|
||||
-> Unique
|
||||
|
@ -981,15 +975,7 @@ EXPLAIN (COSTS FALSE)
|
|||
Sort Key: remote_scan.array_length, remote_scan.array_agg
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> GroupAggregate
|
||||
Group Key: l_orderkey
|
||||
-> Sort
|
||||
Sort Key: l_orderkey
|
||||
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
|
||||
(14 rows)
|
||||
(6 rows)
|
||||
|
||||
SET enable_hashagg TO on;
|
||||
-- distinct on non-partition column with aggregate
|
||||
|
|
|
@ -17,10 +17,15 @@ BEGIN
|
|||
END;
|
||||
$$LANGUAGE plpgsql;
|
||||
-- Create a function to ignore worker plans in explain output
|
||||
-- Also remove extra "-> Result" lines for PG15 support
|
||||
CREATE OR REPLACE FUNCTION coordinator_plan(explain_command text, out query_plan text)
|
||||
RETURNS SETOF TEXT AS $$
|
||||
BEGIN
|
||||
FOR query_plan IN execute explain_command LOOP
|
||||
IF (query_plan LIKE '%-> Result%' OR query_plan = 'Result')
|
||||
THEN
|
||||
CONTINUE;
|
||||
END IF;
|
||||
RETURN next;
|
||||
IF query_plan LIKE '%Task Count:%'
|
||||
THEN
|
||||
|
@ -29,6 +34,65 @@ BEGIN
|
|||
END LOOP;
|
||||
RETURN;
|
||||
END; $$ language plpgsql;
|
||||
-- Create a function to ignore worker plans in explain output
|
||||
-- It also shows task count for plan and subplans
|
||||
-- Also remove extra "-> Result" lines for PG15 support
|
||||
CREATE OR REPLACE FUNCTION coordinator_plan_with_subplans(explain_command text, out query_plan text)
|
||||
RETURNS SETOF TEXT AS $$
|
||||
DECLARE
|
||||
task_count_line_reached boolean := false;
|
||||
BEGIN
|
||||
FOR query_plan IN execute explain_command LOOP
|
||||
IF (query_plan LIKE '%-> Result%' OR query_plan = 'Result') THEN
|
||||
CONTINUE;
|
||||
END IF;
|
||||
IF NOT task_count_line_reached THEN
|
||||
RETURN next;
|
||||
END IF;
|
||||
IF query_plan LIKE '%Task Count:%' THEN
|
||||
IF NOT task_count_line_reached THEN
|
||||
SELECT true INTO task_count_line_reached;
|
||||
ELSE
|
||||
RETURN next;
|
||||
END IF;
|
||||
END IF;
|
||||
END LOOP;
|
||||
RETURN;
|
||||
END; $$ language plpgsql;
|
||||
-- Create a function to ignore "-> Result" lines for PG15 support
|
||||
-- In PG15 there are some extra "-> Result" lines
|
||||
CREATE OR REPLACE FUNCTION plan_without_result_lines(explain_command text, out query_plan text)
|
||||
RETURNS SETOF TEXT AS $$
|
||||
BEGIN
|
||||
FOR query_plan IN execute explain_command LOOP
|
||||
IF (query_plan LIKE '%-> Result%' OR query_plan = 'Result') THEN
|
||||
CONTINUE;
|
||||
END IF;
|
||||
RETURN next;
|
||||
END LOOP;
|
||||
RETURN;
|
||||
END; $$ language plpgsql;
|
||||
-- Create a function to normalize Memory Usage, Buckets, Batches
|
||||
CREATE OR REPLACE FUNCTION plan_normalize_memory(explain_command text, out query_plan text)
|
||||
RETURNS SETOF TEXT AS $$
|
||||
BEGIN
|
||||
FOR query_plan IN execute explain_command LOOP
|
||||
query_plan := regexp_replace(query_plan, '(Memory( Usage)?|Buckets|Batches): \S*', '\1: xxx', 'g');
|
||||
RETURN NEXT;
|
||||
END LOOP;
|
||||
END; $$ language plpgsql;
|
||||
-- Create a function to remove arrows from the explain plan
|
||||
CREATE OR REPLACE FUNCTION plan_without_arrows(explain_command text, out query_plan text)
|
||||
RETURNS SETOF TEXT AS $$
|
||||
BEGIN
|
||||
FOR query_plan IN execute explain_command LOOP
|
||||
IF (query_plan LIKE '%-> Result%' OR query_plan = 'Result') THEN
|
||||
CONTINUE;
|
||||
END IF;
|
||||
query_plan := regexp_replace(query_plan, '( )*-> (.*)', '\2', 'g');
|
||||
RETURN NEXT;
|
||||
END LOOP;
|
||||
END; $$ language plpgsql;
|
||||
-- helper function that returns true if output of given explain has "is not null" (case in-sensitive)
|
||||
CREATE OR REPLACE FUNCTION explain_has_is_not_null(explain_command text)
|
||||
RETURNS BOOLEAN AS $$
|
||||
|
|
|
@ -785,6 +785,7 @@ EXPLAIN (COSTS FALSE) SELECT user_id FROM recent_selected_users GROUP BY 1 ORDER
|
|||
Filter: ((value_1 >= 1) AND (value_1 < 3))
|
||||
(19 rows)
|
||||
|
||||
SELECT public.coordinator_plan($Q$
|
||||
EXPLAIN (COSTS FALSE) SELECT *
|
||||
FROM (
|
||||
(SELECT user_id FROM recent_users)
|
||||
|
@ -792,32 +793,14 @@ EXPLAIN (COSTS FALSE) SELECT *
|
|||
(SELECT user_id FROM selected_users) ) u
|
||||
WHERE user_id < 4 AND user_id > 1
|
||||
ORDER BY user_id;
|
||||
QUERY PLAN
|
||||
$Q$);
|
||||
coordinator_plan
|
||||
---------------------------------------------------------------------
|
||||
Sort
|
||||
Sort Key: remote_scan.user_id
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Unique
|
||||
-> Sort
|
||||
Sort Key: recent_users.user_id
|
||||
-> Append
|
||||
-> Subquery Scan on recent_users
|
||||
-> Sort
|
||||
Sort Key: (max(users_table."time")) DESC
|
||||
-> GroupAggregate
|
||||
Group Key: users_table.user_id
|
||||
Filter: (max(users_table."time") > '2017-11-23 16:20:33.264457'::timestamp without time zone)
|
||||
-> Sort
|
||||
Sort Key: users_table.user_id
|
||||
-> Seq Scan on users_table_1400256 users_table
|
||||
Filter: ((user_id < 4) AND (user_id > 1))
|
||||
-> Seq Scan on users_table_1400256 users_table_1
|
||||
Filter: ((value_1 >= 1) AND (value_1 < 3) AND (user_id < 4) AND (user_id > 1))
|
||||
(23 rows)
|
||||
(4 rows)
|
||||
|
||||
EXPLAIN (COSTS FALSE) SELECT et.* FROM recent_10_users JOIN events_table et USING(user_id) ORDER BY et.time DESC LIMIT 10;
|
||||
QUERY PLAN
|
||||
|
|
|
@ -1,3 +1,17 @@
|
|||
--
|
||||
-- MX_COORDINATOR_SHOULDHAVESHARDS
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
CREATE SCHEMA mx_coordinator_shouldhaveshards;
|
||||
SET search_path TO mx_coordinator_shouldhaveshards;
|
||||
SET citus.shard_replication_factor to 1;
|
||||
|
@ -99,7 +113,7 @@ inserts AS (
|
|||
RETURNING *
|
||||
) SELECT count(*) FROM inserts;
|
||||
DEBUG: generating subplan XXX_1 for CTE stats: SELECT count(key) AS m FROM mx_coordinator_shouldhaveshards.table_1
|
||||
DEBUG: generating subplan XXX_2 for CTE inserts: INSERT INTO mx_coordinator_shouldhaveshards.table_2 (key, value) SELECT key, count(*) AS count FROM mx_coordinator_shouldhaveshards.table_1 WHERE (key OPERATOR(pg_catalog.>=) (SELECT stats.m FROM (SELECT intermediate_result.m FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(m bigint)) stats)) GROUP BY key HAVING (count(*) OPERATOR(pg_catalog.<=) (SELECT stats.m FROM (SELECT intermediate_result.m FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(m bigint)) stats)) LIMIT 1 RETURNING table_2.key, table_2.value
|
||||
DEBUG: generating subplan XXX_2 for CTE inserts: INSERT INTO mx_coordinator_shouldhaveshards.table_2 (key, value) SELECT table_1.key, count(*) AS count FROM mx_coordinator_shouldhaveshards.table_1 WHERE (table_1.key OPERATOR(pg_catalog.>=) (SELECT stats.m FROM (SELECT intermediate_result.m FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(m bigint)) stats)) GROUP BY table_1.key HAVING (count(*) OPERATOR(pg_catalog.<=) (SELECT stats.m FROM (SELECT intermediate_result.m FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(m bigint)) stats)) LIMIT 1 RETURNING table_2.key, table_2.value
|
||||
DEBUG: LIMIT clauses are not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: push down of limit count: 1
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) inserts
|
||||
|
@ -160,7 +174,7 @@ inserts AS (
|
|||
RETURNING *
|
||||
) SELECT count(*) FROM inserts;
|
||||
DEBUG: generating subplan XXX_1 for CTE stats: SELECT count(key) AS m FROM mx_coordinator_shouldhaveshards.table_1_rep
|
||||
DEBUG: generating subplan XXX_2 for CTE inserts: INSERT INTO mx_coordinator_shouldhaveshards.table_2_rep (key, value) SELECT key, count(*) AS count FROM mx_coordinator_shouldhaveshards.table_1_rep WHERE (key OPERATOR(pg_catalog.>=) (SELECT stats.m FROM (SELECT intermediate_result.m FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(m bigint)) stats)) GROUP BY key HAVING (count(*) OPERATOR(pg_catalog.<=) (SELECT stats.m FROM (SELECT intermediate_result.m FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(m bigint)) stats)) LIMIT 1 RETURNING table_2_rep.key, table_2_rep.value
|
||||
DEBUG: generating subplan XXX_2 for CTE inserts: INSERT INTO mx_coordinator_shouldhaveshards.table_2_rep (key, value) SELECT table_1_rep.key, count(*) AS count FROM mx_coordinator_shouldhaveshards.table_1_rep WHERE (table_1_rep.key OPERATOR(pg_catalog.>=) (SELECT stats.m FROM (SELECT intermediate_result.m FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(m bigint)) stats)) GROUP BY table_1_rep.key HAVING (count(*) OPERATOR(pg_catalog.<=) (SELECT stats.m FROM (SELECT intermediate_result.m FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(m bigint)) stats)) LIMIT 1 RETURNING table_2_rep.key, table_2_rep.value
|
||||
DEBUG: LIMIT clauses are not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: push down of limit count: 1
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) inserts
|
||||
|
@ -225,7 +239,7 @@ inserts AS (
|
|||
RETURNING *
|
||||
) SELECT count(*) FROM inserts;
|
||||
DEBUG: generating subplan XXX_1 for CTE stats: SELECT count(key) AS m FROM mx_coordinator_shouldhaveshards.table_1
|
||||
DEBUG: generating subplan XXX_2 for CTE inserts: INSERT INTO mx_coordinator_shouldhaveshards.table_2 (key, value) SELECT key, count(*) AS count FROM mx_coordinator_shouldhaveshards.table_1 WHERE (key OPERATOR(pg_catalog.>=) (SELECT stats.m FROM (SELECT intermediate_result.m FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(m bigint)) stats)) GROUP BY key HAVING (count(*) OPERATOR(pg_catalog.<=) (SELECT stats.m FROM (SELECT intermediate_result.m FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(m bigint)) stats)) LIMIT 1 RETURNING table_2.key, table_2.value
|
||||
DEBUG: generating subplan XXX_2 for CTE inserts: INSERT INTO mx_coordinator_shouldhaveshards.table_2 (key, value) SELECT table_1.key, count(*) AS count FROM mx_coordinator_shouldhaveshards.table_1 WHERE (table_1.key OPERATOR(pg_catalog.>=) (SELECT stats.m FROM (SELECT intermediate_result.m FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(m bigint)) stats)) GROUP BY table_1.key HAVING (count(*) OPERATOR(pg_catalog.<=) (SELECT stats.m FROM (SELECT intermediate_result.m FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(m bigint)) stats)) LIMIT 1 RETURNING table_2.key, table_2.value
|
||||
DEBUG: LIMIT clauses are not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: push down of limit count: 1
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) inserts
|
||||
|
@ -286,7 +300,7 @@ inserts AS (
|
|||
RETURNING *
|
||||
) SELECT count(*) FROM inserts;
|
||||
DEBUG: generating subplan XXX_1 for CTE stats: SELECT count(key) AS m FROM mx_coordinator_shouldhaveshards.table_1_rep
|
||||
DEBUG: generating subplan XXX_2 for CTE inserts: INSERT INTO mx_coordinator_shouldhaveshards.table_2_rep (key, value) SELECT key, count(*) AS count FROM mx_coordinator_shouldhaveshards.table_1_rep WHERE (key OPERATOR(pg_catalog.>=) (SELECT stats.m FROM (SELECT intermediate_result.m FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(m bigint)) stats)) GROUP BY key HAVING (count(*) OPERATOR(pg_catalog.<=) (SELECT stats.m FROM (SELECT intermediate_result.m FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(m bigint)) stats)) LIMIT 1 RETURNING table_2_rep.key, table_2_rep.value
|
||||
DEBUG: generating subplan XXX_2 for CTE inserts: INSERT INTO mx_coordinator_shouldhaveshards.table_2_rep (key, value) SELECT table_1_rep.key, count(*) AS count FROM mx_coordinator_shouldhaveshards.table_1_rep WHERE (table_1_rep.key OPERATOR(pg_catalog.>=) (SELECT stats.m FROM (SELECT intermediate_result.m FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(m bigint)) stats)) GROUP BY table_1_rep.key HAVING (count(*) OPERATOR(pg_catalog.<=) (SELECT stats.m FROM (SELECT intermediate_result.m FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(m bigint)) stats)) LIMIT 1 RETURNING table_2_rep.key, table_2_rep.value
|
||||
DEBUG: LIMIT clauses are not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: push down of limit count: 1
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) inserts
|
||||
|
|
|
@ -0,0 +1,331 @@
|
|||
--
|
||||
-- MX_COORDINATOR_SHOULDHAVESHARDS
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
||||
CREATE SCHEMA mx_coordinator_shouldhaveshards;
|
||||
SET search_path TO mx_coordinator_shouldhaveshards;
|
||||
SET citus.shard_replication_factor to 1;
|
||||
SET client_min_messages TO WARNING;
|
||||
SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
RESET client_min_messages;
|
||||
SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhaveshards', true);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- issue 4508 table_1 and table_2 are used to test some edge cases
|
||||
-- around intermediate result pruning
|
||||
CREATE TABLE table_1 (key int, value text);
|
||||
SELECT create_distributed_table('table_1', 'key', colocate_with := 'none');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE table_2 (key int, value text);
|
||||
SELECT create_distributed_table('table_2', 'key', colocate_with := 'none');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO table_1 VALUES (1, '1'), (2, '2'), (3, '3'), (4, '4');
|
||||
INSERT INTO table_2 VALUES (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6');
|
||||
SET citus.shard_replication_factor to 2;
|
||||
CREATE TABLE table_1_rep (key int, value text);
|
||||
SELECT create_distributed_table('table_1_rep', 'key', colocate_with := 'none');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE table_2_rep (key int, value text);
|
||||
SELECT create_distributed_table('table_2_rep', 'key', colocate_with := 'none');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO table_1_rep VALUES (1, '1'), (2, '2'), (3, '3'), (4, '4');
|
||||
INSERT INTO table_2_rep VALUES (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6');
|
||||
set citus.log_intermediate_results TO ON;
|
||||
set client_min_messages to debug1;
|
||||
WITH a AS (SELECT * FROM table_1 ORDER BY 1,2 DESC LIMIT 1)
|
||||
SELECT count(*),
|
||||
key
|
||||
FROM a JOIN table_2 USING (key)
|
||||
GROUP BY key
|
||||
HAVING (max(table_2.value) >= (SELECT value FROM a));
|
||||
DEBUG: generating subplan XXX_1 for CTE a: SELECT key, value FROM mx_coordinator_shouldhaveshards.table_1 ORDER BY key, value DESC LIMIT 1
|
||||
DEBUG: push down of limit count: 1
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count, a.key FROM ((SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a JOIN mx_coordinator_shouldhaveshards.table_2 USING (key)) GROUP BY a.key HAVING (max(table_2.value) OPERATOR(pg_catalog.>=) (SELECT a_1.value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a_1))
|
||||
DEBUG: Subplan XXX_1 will be written to local file
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
count | key
|
||||
---------------------------------------------------------------------
|
||||
1 | 1
|
||||
(1 row)
|
||||
|
||||
WITH a AS (SELECT * FROM table_1 ORDER BY 1,2 DESC LIMIT 1)
|
||||
INSERT INTO table_1 SELECT count(*),
|
||||
key
|
||||
FROM a JOIN table_2 USING (key)
|
||||
GROUP BY key
|
||||
HAVING (max(table_2.value) >= (SELECT value FROM a));
|
||||
DEBUG: Group by list without distribution column is not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: generating subplan XXX_1 for CTE a: SELECT key, value FROM mx_coordinator_shouldhaveshards.table_1 ORDER BY key, value DESC LIMIT 1
|
||||
DEBUG: push down of limit count: 1
|
||||
DEBUG: generating subplan XXX_2 for subquery SELECT int4(count(*)) AS auto_coerced_by_citus_0, (a.key)::text AS auto_coerced_by_citus_1 FROM ((SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a JOIN mx_coordinator_shouldhaveshards.table_2 USING (key)) GROUP BY a.key HAVING (max(table_2.value) OPERATOR(pg_catalog.>=) (SELECT a_1.value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a_1))
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT auto_coerced_by_citus_0 AS key, auto_coerced_by_citus_1 AS value FROM (SELECT intermediate_result.auto_coerced_by_citus_0, intermediate_result.auto_coerced_by_citus_1 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(auto_coerced_by_citus_0 integer, auto_coerced_by_citus_1 text)) citus_insert_select_subquery
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: Subplan XXX_1 will be written to local file
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_2 will be written to local file
|
||||
WITH stats AS (
|
||||
SELECT count(key) m FROM table_1
|
||||
),
|
||||
inserts AS (
|
||||
INSERT INTO table_2
|
||||
SELECT key, count(*)
|
||||
FROM table_1
|
||||
WHERE key >= (SELECT m FROM stats)
|
||||
GROUP BY key
|
||||
HAVING count(*) <= (SELECT m FROM stats)
|
||||
LIMIT 1
|
||||
RETURNING *
|
||||
) SELECT count(*) FROM inserts;
|
||||
DEBUG: generating subplan XXX_1 for CTE stats: SELECT count(key) AS m FROM mx_coordinator_shouldhaveshards.table_1
|
||||
DEBUG: generating subplan XXX_2 for CTE inserts: INSERT INTO mx_coordinator_shouldhaveshards.table_2 (key, value) SELECT key, count(*) AS count FROM mx_coordinator_shouldhaveshards.table_1 WHERE (key OPERATOR(pg_catalog.>=) (SELECT stats.m FROM (SELECT intermediate_result.m FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(m bigint)) stats)) GROUP BY key HAVING (count(*) OPERATOR(pg_catalog.<=) (SELECT stats.m FROM (SELECT intermediate_result.m FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(m bigint)) stats)) LIMIT 1 RETURNING table_2.key, table_2.value
|
||||
DEBUG: LIMIT clauses are not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: push down of limit count: 1
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) inserts
|
||||
DEBUG: Subplan XXX_1 will be written to local file
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_2 will be written to local file
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
WITH a AS (SELECT * FROM table_1_rep ORDER BY 1,2 DESC LIMIT 1)
|
||||
SELECT count(*),
|
||||
key
|
||||
FROM a JOIN table_2_rep USING (key)
|
||||
GROUP BY key
|
||||
HAVING (max(table_2_rep.value) >= (SELECT value FROM a));
|
||||
DEBUG: generating subplan XXX_1 for CTE a: SELECT key, value FROM mx_coordinator_shouldhaveshards.table_1_rep ORDER BY key, value DESC LIMIT 1
|
||||
DEBUG: push down of limit count: 1
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count, a.key FROM ((SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a JOIN mx_coordinator_shouldhaveshards.table_2_rep USING (key)) GROUP BY a.key HAVING (max(table_2_rep.value) OPERATOR(pg_catalog.>=) (SELECT a_1.value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a_1))
|
||||
DEBUG: Subplan XXX_1 will be written to local file
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
count | key
|
||||
---------------------------------------------------------------------
|
||||
1 | 1
|
||||
(1 row)
|
||||
|
||||
WITH a AS (SELECT * FROM table_1_rep ORDER BY 1,2 DESC LIMIT 1)
|
||||
INSERT INTO table_1_rep SELECT count(*),
|
||||
key
|
||||
FROM a JOIN table_2_rep USING (key)
|
||||
GROUP BY key
|
||||
HAVING (max(table_2_rep.value) >= (SELECT value FROM a));
|
||||
DEBUG: Group by list without distribution column is not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: generating subplan XXX_1 for CTE a: SELECT key, value FROM mx_coordinator_shouldhaveshards.table_1_rep ORDER BY key, value DESC LIMIT 1
|
||||
DEBUG: push down of limit count: 1
|
||||
DEBUG: generating subplan XXX_2 for subquery SELECT int4(count(*)) AS auto_coerced_by_citus_0, (a.key)::text AS auto_coerced_by_citus_1 FROM ((SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a JOIN mx_coordinator_shouldhaveshards.table_2_rep USING (key)) GROUP BY a.key HAVING (max(table_2_rep.value) OPERATOR(pg_catalog.>=) (SELECT a_1.value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a_1))
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT auto_coerced_by_citus_0 AS key, auto_coerced_by_citus_1 AS value FROM (SELECT intermediate_result.auto_coerced_by_citus_0, intermediate_result.auto_coerced_by_citus_1 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(auto_coerced_by_citus_0 integer, auto_coerced_by_citus_1 text)) citus_insert_select_subquery
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: Subplan XXX_1 will be written to local file
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_2 will be written to local file
|
||||
WITH stats AS (
|
||||
SELECT count(key) m FROM table_1_rep
|
||||
),
|
||||
inserts AS (
|
||||
INSERT INTO table_2_rep
|
||||
SELECT key, count(*)
|
||||
FROM table_1_rep
|
||||
WHERE key >= (SELECT m FROM stats)
|
||||
GROUP BY key
|
||||
HAVING count(*) <= (SELECT m FROM stats)
|
||||
LIMIT 1
|
||||
RETURNING *
|
||||
) SELECT count(*) FROM inserts;
|
||||
DEBUG: generating subplan XXX_1 for CTE stats: SELECT count(key) AS m FROM mx_coordinator_shouldhaveshards.table_1_rep
|
||||
DEBUG: generating subplan XXX_2 for CTE inserts: INSERT INTO mx_coordinator_shouldhaveshards.table_2_rep (key, value) SELECT key, count(*) AS count FROM mx_coordinator_shouldhaveshards.table_1_rep WHERE (key OPERATOR(pg_catalog.>=) (SELECT stats.m FROM (SELECT intermediate_result.m FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(m bigint)) stats)) GROUP BY key HAVING (count(*) OPERATOR(pg_catalog.<=) (SELECT stats.m FROM (SELECT intermediate_result.m FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(m bigint)) stats)) LIMIT 1 RETURNING table_2_rep.key, table_2_rep.value
|
||||
DEBUG: LIMIT clauses are not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: push down of limit count: 1
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) inserts
|
||||
DEBUG: Subplan XXX_1 will be written to local file
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_2 will be written to local file
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SET search_path TO mx_coordinator_shouldhaveshards;
|
||||
set citus.log_intermediate_results TO ON;
|
||||
set client_min_messages to debug1;
|
||||
WITH a AS (SELECT * FROM table_1 ORDER BY 1,2 DESC LIMIT 1)
|
||||
SELECT count(*),
|
||||
key
|
||||
FROM a JOIN table_2 USING (key)
|
||||
GROUP BY key
|
||||
HAVING (max(table_2.value) >= (SELECT value FROM a));
|
||||
DEBUG: generating subplan XXX_1 for CTE a: SELECT key, value FROM mx_coordinator_shouldhaveshards.table_1 ORDER BY key, value DESC LIMIT 1
|
||||
DEBUG: push down of limit count: 1
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count, a.key FROM ((SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a JOIN mx_coordinator_shouldhaveshards.table_2 USING (key)) GROUP BY a.key HAVING (max(table_2.value) OPERATOR(pg_catalog.>=) (SELECT a_1.value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a_1))
|
||||
DEBUG: Subplan XXX_1 will be written to local file
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
count | key
|
||||
---------------------------------------------------------------------
|
||||
1 | 1
|
||||
(1 row)
|
||||
|
||||
WITH a AS (SELECT * FROM table_1 ORDER BY 1,2 DESC LIMIT 1)
|
||||
INSERT INTO table_1 SELECT count(*),
|
||||
key
|
||||
FROM a JOIN table_2 USING (key)
|
||||
GROUP BY key
|
||||
HAVING (max(table_2.value) >= (SELECT value FROM a));
|
||||
DEBUG: Group by list without distribution column is not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: generating subplan XXX_1 for CTE a: SELECT key, value FROM mx_coordinator_shouldhaveshards.table_1 ORDER BY key, value DESC LIMIT 1
|
||||
DEBUG: push down of limit count: 1
|
||||
DEBUG: generating subplan XXX_2 for subquery SELECT int4(count(*)) AS auto_coerced_by_citus_0, (a.key)::text AS auto_coerced_by_citus_1 FROM ((SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a JOIN mx_coordinator_shouldhaveshards.table_2 USING (key)) GROUP BY a.key HAVING (max(table_2.value) OPERATOR(pg_catalog.>=) (SELECT a_1.value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a_1))
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT auto_coerced_by_citus_0 AS key, auto_coerced_by_citus_1 AS value FROM (SELECT intermediate_result.auto_coerced_by_citus_0, intermediate_result.auto_coerced_by_citus_1 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(auto_coerced_by_citus_0 integer, auto_coerced_by_citus_1 text)) citus_insert_select_subquery
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: Subplan XXX_1 will be written to local file
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_2 will be written to local file
|
||||
WITH stats AS (
|
||||
SELECT count(key) m FROM table_1
|
||||
),
|
||||
inserts AS (
|
||||
INSERT INTO table_2
|
||||
SELECT key, count(*)
|
||||
FROM table_1
|
||||
WHERE key >= (SELECT m FROM stats)
|
||||
GROUP BY key
|
||||
HAVING count(*) <= (SELECT m FROM stats)
|
||||
LIMIT 1
|
||||
RETURNING *
|
||||
) SELECT count(*) FROM inserts;
|
||||
DEBUG: generating subplan XXX_1 for CTE stats: SELECT count(key) AS m FROM mx_coordinator_shouldhaveshards.table_1
|
||||
DEBUG: generating subplan XXX_2 for CTE inserts: INSERT INTO mx_coordinator_shouldhaveshards.table_2 (key, value) SELECT key, count(*) AS count FROM mx_coordinator_shouldhaveshards.table_1 WHERE (key OPERATOR(pg_catalog.>=) (SELECT stats.m FROM (SELECT intermediate_result.m FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(m bigint)) stats)) GROUP BY key HAVING (count(*) OPERATOR(pg_catalog.<=) (SELECT stats.m FROM (SELECT intermediate_result.m FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(m bigint)) stats)) LIMIT 1 RETURNING table_2.key, table_2.value
|
||||
DEBUG: LIMIT clauses are not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: push down of limit count: 1
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) inserts
|
||||
DEBUG: Subplan XXX_1 will be written to local file
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_2 will be written to local file
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
WITH a AS (SELECT * FROM table_1_rep ORDER BY 1,2 DESC LIMIT 1)
|
||||
SELECT count(*),
|
||||
key
|
||||
FROM a JOIN table_2_rep USING (key)
|
||||
GROUP BY key
|
||||
HAVING (max(table_2_rep.value) >= (SELECT value FROM a));
|
||||
DEBUG: generating subplan XXX_1 for CTE a: SELECT key, value FROM mx_coordinator_shouldhaveshards.table_1_rep ORDER BY key, value DESC LIMIT 1
|
||||
DEBUG: push down of limit count: 1
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count, a.key FROM ((SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a JOIN mx_coordinator_shouldhaveshards.table_2_rep USING (key)) GROUP BY a.key HAVING (max(table_2_rep.value) OPERATOR(pg_catalog.>=) (SELECT a_1.value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a_1))
|
||||
DEBUG: Subplan XXX_1 will be written to local file
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
count | key
|
||||
---------------------------------------------------------------------
|
||||
1 | 1
|
||||
(1 row)
|
||||
|
||||
WITH a AS (SELECT * FROM table_1_rep ORDER BY 1,2 DESC LIMIT 1)
|
||||
INSERT INTO table_1_rep SELECT count(*),
|
||||
key
|
||||
FROM a JOIN table_2_rep USING (key)
|
||||
GROUP BY key
|
||||
HAVING (max(table_2_rep.value) >= (SELECT value FROM a));
|
||||
DEBUG: Group by list without distribution column is not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: generating subplan XXX_1 for CTE a: SELECT key, value FROM mx_coordinator_shouldhaveshards.table_1_rep ORDER BY key, value DESC LIMIT 1
|
||||
DEBUG: push down of limit count: 1
|
||||
DEBUG: generating subplan XXX_2 for subquery SELECT int4(count(*)) AS auto_coerced_by_citus_0, (a.key)::text AS auto_coerced_by_citus_1 FROM ((SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a JOIN mx_coordinator_shouldhaveshards.table_2_rep USING (key)) GROUP BY a.key HAVING (max(table_2_rep.value) OPERATOR(pg_catalog.>=) (SELECT a_1.value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a_1))
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT auto_coerced_by_citus_0 AS key, auto_coerced_by_citus_1 AS value FROM (SELECT intermediate_result.auto_coerced_by_citus_0, intermediate_result.auto_coerced_by_citus_1 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(auto_coerced_by_citus_0 integer, auto_coerced_by_citus_1 text)) citus_insert_select_subquery
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: Subplan XXX_1 will be written to local file
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_2 will be written to local file
|
||||
WITH stats AS (
|
||||
SELECT count(key) m FROM table_1_rep
|
||||
),
|
||||
inserts AS (
|
||||
INSERT INTO table_2_rep
|
||||
SELECT key, count(*)
|
||||
FROM table_1_rep
|
||||
WHERE key >= (SELECT m FROM stats)
|
||||
GROUP BY key
|
||||
HAVING count(*) <= (SELECT m FROM stats)
|
||||
LIMIT 1
|
||||
RETURNING *
|
||||
) SELECT count(*) FROM inserts;
|
||||
DEBUG: generating subplan XXX_1 for CTE stats: SELECT count(key) AS m FROM mx_coordinator_shouldhaveshards.table_1_rep
|
||||
DEBUG: generating subplan XXX_2 for CTE inserts: INSERT INTO mx_coordinator_shouldhaveshards.table_2_rep (key, value) SELECT key, count(*) AS count FROM mx_coordinator_shouldhaveshards.table_1_rep WHERE (key OPERATOR(pg_catalog.>=) (SELECT stats.m FROM (SELECT intermediate_result.m FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(m bigint)) stats)) GROUP BY key HAVING (count(*) OPERATOR(pg_catalog.<=) (SELECT stats.m FROM (SELECT intermediate_result.m FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(m bigint)) stats)) LIMIT 1 RETURNING table_2_rep.key, table_2_rep.value
|
||||
DEBUG: LIMIT clauses are not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: push down of limit count: 1
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) inserts
|
||||
DEBUG: Subplan XXX_1 will be written to local file
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_2 will be written to local file
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhaveshards', false);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SET client_min_messages TO ERROR;
|
||||
DROP SCHEMA mx_coordinator_shouldhaveshards CASCADE;
|
||||
SELECT master_remove_node('localhost', :master_port);
|
||||
master_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
|
@ -588,8 +588,12 @@ NOTICE: renaming the new table to test_pg12.generated_stored_ref
|
|||
ROLLBACK;
|
||||
BEGIN;
|
||||
-- drop some of the columns not having "generated always as stored" expressions
|
||||
-- this would drop generated columns too
|
||||
ALTER TABLE generated_stored_ref DROP COLUMN col_1;
|
||||
-- PRE PG15, this would drop generated columns too
|
||||
-- In PG15, CASCADE option must be specified
|
||||
-- Relevant PG Commit: cb02fcb4c95bae08adaca1202c2081cfc81a28b5
|
||||
SET client_min_messages TO WARNING;
|
||||
ALTER TABLE generated_stored_ref DROP COLUMN col_1 CASCADE;
|
||||
RESET client_min_messages;
|
||||
ALTER TABLE generated_stored_ref DROP COLUMN col_4;
|
||||
-- show that undistribute_table works fine
|
||||
SELECT undistribute_table('generated_stored_ref');
|
||||
|
|
|
@ -279,7 +279,7 @@ SELECT create_distributed_table('col_compression', 'a', shard_count:=4);
|
|||
|
||||
(1 row)
|
||||
|
||||
SELECT attname || ' ' || attcompression AS column_compression FROM pg_attribute WHERE attrelid::regclass::text LIKE 'col\_compression%' AND attnum > 0 ORDER BY 1;
|
||||
SELECT attname || ' ' || attcompression::text AS column_compression FROM pg_attribute WHERE attrelid::regclass::text LIKE 'col\_compression%' AND attnum > 0 ORDER BY 1;
|
||||
column_compression
|
||||
---------------------------------------------------------------------
|
||||
a p
|
||||
|
@ -287,7 +287,7 @@ SELECT attname || ' ' || attcompression AS column_compression FROM pg_attribute
|
|||
(2 rows)
|
||||
|
||||
SELECT result AS column_compression FROM run_command_on_workers($$SELECT ARRAY(
|
||||
SELECT attname || ' ' || attcompression FROM pg_attribute WHERE attrelid::regclass::text LIKE 'pg14.col\_compression%' AND attnum > 0 ORDER BY 1
|
||||
SELECT attname || ' ' || attcompression::text FROM pg_attribute WHERE attrelid::regclass::text LIKE 'pg14.col\_compression%' AND attnum > 0 ORDER BY 1
|
||||
)$$);
|
||||
column_compression
|
||||
---------------------------------------------------------------------
|
||||
|
@ -313,7 +313,7 @@ NOTICE: Moving shard xxxxx from localhost:xxxxx to localhost:xxxxx ...
|
|||
CALL citus_cleanup_orphaned_shards();
|
||||
NOTICE: cleaned up 1 orphaned shards
|
||||
SELECT result AS column_compression FROM run_command_on_workers($$SELECT ARRAY(
|
||||
SELECT attname || ' ' || attcompression FROM pg_attribute WHERE attrelid::regclass::text LIKE 'pg14.col\_compression%' AND attnum > 0 ORDER BY 1
|
||||
SELECT attname || ' ' || attcompression::text FROM pg_attribute WHERE attrelid::regclass::text LIKE 'pg14.col\_compression%' AND attnum > 0 ORDER BY 1
|
||||
)$$);
|
||||
column_compression
|
||||
---------------------------------------------------------------------
|
||||
|
@ -325,7 +325,7 @@ SELECT attname || ' ' || attcompression FROM pg_attribute WHERE attrelid::regcla
|
|||
ALTER TABLE col_compression ALTER COLUMN b SET COMPRESSION pglz;
|
||||
ALTER TABLE col_compression ALTER COLUMN a SET COMPRESSION default;
|
||||
SELECT result AS column_compression FROM run_command_on_workers($$SELECT ARRAY(
|
||||
SELECT attname || ' ' || attcompression FROM pg_attribute WHERE attrelid::regclass::text LIKE 'pg14.col\_compression%' AND attnum > 0 ORDER BY 1
|
||||
SELECT attname || ' ' || attcompression::text FROM pg_attribute WHERE attrelid::regclass::text LIKE 'pg14.col\_compression%' AND attnum > 0 ORDER BY 1
|
||||
)$$);
|
||||
column_compression
|
||||
---------------------------------------------------------------------
|
||||
|
@ -336,7 +336,7 @@ SELECT attname || ' ' || attcompression FROM pg_attribute WHERE attrelid::regcla
|
|||
-- test propagation of ALTER TABLE .. ADD COLUMN .. COMPRESSION ..
|
||||
ALTER TABLE col_compression ADD COLUMN c TEXT COMPRESSION pglz;
|
||||
SELECT result AS column_compression FROM run_command_on_workers($$SELECT ARRAY(
|
||||
SELECT attname || ' ' || attcompression FROM pg_attribute WHERE attrelid::regclass::text LIKE 'pg14.col\_compression%' AND attnum > 0 ORDER BY 1
|
||||
SELECT attname || ' ' || attcompression::text FROM pg_attribute WHERE attrelid::regclass::text LIKE 'pg14.col\_compression%' AND attnum > 0 ORDER BY 1
|
||||
)$$);
|
||||
column_compression
|
||||
---------------------------------------------------------------------
|
||||
|
@ -354,7 +354,7 @@ SELECT create_distributed_table('col_comp_par', 'a');
|
|||
|
||||
CREATE TABLE col_comp_par_1 PARTITION OF col_comp_par FOR VALUES FROM ('abc') TO ('def');
|
||||
SELECT result AS column_compression FROM run_command_on_workers($$SELECT ARRAY(
|
||||
SELECT attname || ' ' || attcompression FROM pg_attribute WHERE attrelid::regclass::text LIKE 'pg14.col\_comp\_par\_1\_%' AND attnum > 0 ORDER BY 1
|
||||
SELECT attname || ' ' || attcompression::text FROM pg_attribute WHERE attrelid::regclass::text LIKE 'pg14.col\_comp\_par\_1\_%' AND attnum > 0 ORDER BY 1
|
||||
)$$);
|
||||
column_compression
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -0,0 +1,250 @@
|
|||
--
|
||||
-- PG15
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
\if :server_version_ge_15
|
||||
\else
|
||||
\q
|
||||
\endif
|
||||
CREATE SCHEMA pg15;
|
||||
SET search_path TO pg15;
|
||||
SET citus.next_shard_id TO 960000;
|
||||
SET citus.shard_count TO 4;
|
||||
--
|
||||
-- In PG15, there is an added option to use ICU as global locale provider.
|
||||
-- pg_collation has three locale-related fields: collcollate and collctype,
|
||||
-- which are libc-related fields, and a new one colliculocale, which is the
|
||||
-- ICU-related field. Only the libc-related fields or the ICU-related field
|
||||
-- is set, never both.
|
||||
-- Relevant PG commits:
|
||||
-- f2553d43060edb210b36c63187d52a632448e1d2
|
||||
-- 54637508f87bd5f07fb9406bac6b08240283be3b
|
||||
--
|
||||
-- fail, needs "locale"
|
||||
CREATE COLLATION german_phonebook_test (provider = icu, lc_collate = 'de-u-co-phonebk');
|
||||
ERROR: parameter "locale" must be specified
|
||||
-- fail, needs "locale"
|
||||
CREATE COLLATION german_phonebook_test (provider = icu, lc_collate = 'de-u-co-phonebk', lc_ctype = 'de-u-co-phonebk');
|
||||
ERROR: parameter "locale" must be specified
|
||||
-- works
|
||||
CREATE COLLATION german_phonebook_test (provider = icu, locale = 'de-u-co-phonebk');
|
||||
-- with icu provider, colliculocale will be set, collcollate and collctype will be null
|
||||
SELECT result FROM run_command_on_all_nodes('
|
||||
SELECT collcollate FROM pg_collation WHERE collname = ''german_phonebook_test'';
|
||||
');
|
||||
result
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
|
||||
(3 rows)
|
||||
|
||||
SELECT result FROM run_command_on_all_nodes('
|
||||
SELECT collctype FROM pg_collation WHERE collname = ''german_phonebook_test'';
|
||||
');
|
||||
result
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
|
||||
(3 rows)
|
||||
|
||||
SELECT result FROM run_command_on_all_nodes('
|
||||
SELECT colliculocale FROM pg_collation WHERE collname = ''german_phonebook_test'';
|
||||
');
|
||||
result
|
||||
---------------------------------------------------------------------
|
||||
de-u-co-phonebk
|
||||
de-u-co-phonebk
|
||||
de-u-co-phonebk
|
||||
(3 rows)
|
||||
|
||||
-- with non-icu provider, colliculocale will be null, collcollate and collctype will be set
|
||||
CREATE COLLATION default_provider (provider = libc, lc_collate = "POSIX", lc_ctype = "POSIX");
|
||||
SELECT result FROM run_command_on_all_nodes('
|
||||
SELECT collcollate FROM pg_collation WHERE collname = ''default_provider'';
|
||||
');
|
||||
result
|
||||
---------------------------------------------------------------------
|
||||
POSIX
|
||||
POSIX
|
||||
POSIX
|
||||
(3 rows)
|
||||
|
||||
SELECT result FROM run_command_on_all_nodes('
|
||||
SELECT collctype FROM pg_collation WHERE collname = ''default_provider'';
|
||||
');
|
||||
result
|
||||
---------------------------------------------------------------------
|
||||
POSIX
|
||||
POSIX
|
||||
POSIX
|
||||
(3 rows)
|
||||
|
||||
SELECT result FROM run_command_on_all_nodes('
|
||||
SELECT colliculocale FROM pg_collation WHERE collname = ''default_provider'';
|
||||
');
|
||||
result
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
|
||||
(3 rows)
|
||||
|
||||
--
|
||||
-- In PG15, Renaming triggers on partitioned tables had two problems
|
||||
-- recurses to renaming the triggers on the partitions as well.
|
||||
-- Here we test that distributed triggers behave the same way.
|
||||
-- Relevant PG commit:
|
||||
-- 80ba4bb383538a2ee846fece6a7b8da9518b6866
|
||||
--
|
||||
SET citus.enable_unsafe_triggers TO true;
|
||||
CREATE TABLE sale(
|
||||
sale_date date not null,
|
||||
state_code text,
|
||||
product_sku text,
|
||||
units integer)
|
||||
PARTITION BY list (state_code);
|
||||
ALTER TABLE sale ADD CONSTRAINT sale_pk PRIMARY KEY (state_code, sale_date);
|
||||
CREATE TABLE sale_newyork PARTITION OF sale FOR VALUES IN ('NY');
|
||||
CREATE TABLE sale_california PARTITION OF sale FOR VALUES IN ('CA');
|
||||
CREATE TABLE record_sale(
|
||||
operation_type text not null,
|
||||
product_sku text,
|
||||
state_code text,
|
||||
units integer,
|
||||
PRIMARY KEY(state_code, product_sku, operation_type, units));
|
||||
SELECT create_distributed_table('sale', 'state_code');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('record_sale', 'state_code', colocate_with := 'sale');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE OR REPLACE FUNCTION record_sale()
|
||||
RETURNS trigger
|
||||
AS $$
|
||||
BEGIN
|
||||
INSERT INTO pg15.record_sale(operation_type, product_sku, state_code, units)
|
||||
VALUES (TG_OP, NEW.product_sku, NEW.state_code, NEW.units);
|
||||
RETURN NULL;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
CREATE TRIGGER record_sale_trigger
|
||||
AFTER INSERT OR UPDATE OR DELETE ON sale
|
||||
FOR EACH ROW EXECUTE FUNCTION pg15.record_sale();
|
||||
CREATE VIEW sale_triggers AS
|
||||
SELECT tgname, tgrelid::regclass, tgenabled
|
||||
FROM pg_trigger
|
||||
WHERE tgrelid::regclass::text like 'sale%'
|
||||
ORDER BY 1, 2;
|
||||
SELECT * FROM sale_triggers ORDER BY 1, 2;
|
||||
tgname | tgrelid | tgenabled
|
||||
---------------------------------------------------------------------
|
||||
record_sale_trigger | sale | O
|
||||
record_sale_trigger | sale_newyork | O
|
||||
record_sale_trigger | sale_california | O
|
||||
truncate_trigger_xxxxxxx | sale | O
|
||||
truncate_trigger_xxxxxxx | sale_california | O
|
||||
truncate_trigger_xxxxxxx | sale_newyork | O
|
||||
(6 rows)
|
||||
|
||||
ALTER TRIGGER "record_sale_trigger" ON "pg15"."sale" RENAME TO "new_record_sale_trigger";
|
||||
SELECT * FROM sale_triggers ORDER BY 1, 2;
|
||||
tgname | tgrelid | tgenabled
|
||||
---------------------------------------------------------------------
|
||||
new_record_sale_trigger | sale | O
|
||||
new_record_sale_trigger | sale_newyork | O
|
||||
new_record_sale_trigger | sale_california | O
|
||||
truncate_trigger_xxxxxxx | sale | O
|
||||
truncate_trigger_xxxxxxx | sale_california | O
|
||||
truncate_trigger_xxxxxxx | sale_newyork | O
|
||||
(6 rows)
|
||||
|
||||
--
|
||||
-- In PG15, For GENERATED columns, all dependencies of the generation
|
||||
-- expression are recorded as NORMAL dependencies of the column itself.
|
||||
-- This requires CASCADE to drop generated cols with the original col.
|
||||
-- Test this behavior in distributed table, specifically with
|
||||
-- undistribute_table within a transaction.
|
||||
-- Relevant PG Commit: cb02fcb4c95bae08adaca1202c2081cfc81a28b5
|
||||
--
|
||||
CREATE TABLE generated_stored_ref (
|
||||
col_1 int,
|
||||
col_2 int,
|
||||
col_3 int generated always as (col_1+col_2) stored,
|
||||
col_4 int,
|
||||
col_5 int generated always as (col_4*2-col_1) stored
|
||||
);
|
||||
SELECT create_reference_table ('generated_stored_ref');
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- populate the table
|
||||
INSERT INTO generated_stored_ref (col_1, col_4) VALUES (1,2), (11,12);
|
||||
INSERT INTO generated_stored_ref (col_1, col_2, col_4) VALUES (100,101,102), (200,201,202);
|
||||
SELECT * FROM generated_stored_ref ORDER BY 1,2,3,4,5;
|
||||
col_1 | col_2 | col_3 | col_4 | col_5
|
||||
---------------------------------------------------------------------
|
||||
1 | | | 2 | 3
|
||||
11 | | | 12 | 13
|
||||
100 | 101 | 201 | 102 | 104
|
||||
200 | 201 | 401 | 202 | 204
|
||||
(4 rows)
|
||||
|
||||
-- fails, CASCADE must be specified
|
||||
-- will test CASCADE inside the transcation
|
||||
ALTER TABLE generated_stored_ref DROP COLUMN col_1;
|
||||
ERROR: cannot drop column col_1 of table generated_stored_ref because other objects depend on it
|
||||
DETAIL: column col_3 of table generated_stored_ref depends on column col_1 of table generated_stored_ref
|
||||
column col_5 of table generated_stored_ref depends on column col_1 of table generated_stored_ref
|
||||
HINT: Use DROP ... CASCADE to drop the dependent objects too.
|
||||
BEGIN;
|
||||
-- drops col_1, col_3, col_5
|
||||
ALTER TABLE generated_stored_ref DROP COLUMN col_1 CASCADE;
|
||||
NOTICE: drop cascades to 2 other objects
|
||||
DETAIL: drop cascades to column col_3 of table generated_stored_ref
|
||||
drop cascades to column col_5 of table generated_stored_ref
|
||||
ALTER TABLE generated_stored_ref DROP COLUMN col_4;
|
||||
-- show that undistribute_table works fine
|
||||
SELECT undistribute_table('generated_stored_ref');
|
||||
NOTICE: creating a new table for pg15.generated_stored_ref
|
||||
NOTICE: moving the data of pg15.generated_stored_ref
|
||||
NOTICE: dropping the old pg15.generated_stored_ref
|
||||
NOTICE: renaming the new table to pg15.generated_stored_ref
|
||||
undistribute_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO generated_stored_ref VALUES (5);
|
||||
SELECT * FROM generated_stored_REF ORDER BY 1;
|
||||
col_2
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
101
|
||||
201
|
||||
|
||||
|
||||
(5 rows)
|
||||
|
||||
ROLLBACK;
|
||||
-- Clean up
|
||||
DROP SCHEMA pg15 CASCADE;
|
||||
NOTICE: drop cascades to 7 other objects
|
||||
DETAIL: drop cascades to collation german_phonebook_test
|
||||
drop cascades to collation default_provider
|
||||
drop cascades to table sale
|
||||
drop cascades to table record_sale
|
||||
drop cascades to function record_sale()
|
||||
drop cascades to view sale_triggers
|
||||
drop cascades to table generated_stored_ref
|
|
@ -0,0 +1,9 @@
|
|||
--
|
||||
-- PG15
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
\if :server_version_ge_15
|
||||
\else
|
||||
\q
|
|
@ -533,7 +533,7 @@ DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.t
|
|||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan XXX_2 for subquery SELECT 1, 1 FROM recursive_union.test
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?" FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer)
|
||||
DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result."?column?", intermediate_result."?column?_1" FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer)
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_union.test USING (x)) ORDER BY u.x, u.y
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
x | y | y
|
||||
|
@ -1013,7 +1013,7 @@ DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.t
|
|||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan XXX_2 for subquery SELECT 1, 1 FROM recursive_union.test
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?" FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer)
|
||||
DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result."?column?", intermediate_result."?column?_1" FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer)
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT u.x, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_union.test USING (x)) ORDER BY u.x, test.y) set_view_recursive_second ORDER BY x, y
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
x | y
|
||||
|
@ -1030,7 +1030,7 @@ DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.t
|
|||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan XXX_2 for subquery SELECT 1, 1 FROM recursive_union.test
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?" FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer)
|
||||
DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result."?column?", intermediate_result."?column?_1" FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer)
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan XXX_4 for subquery SELECT y FROM recursive_union.test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
|
|
|
@ -1,3 +1,17 @@
|
|||
--
|
||||
-- SINGLE_NODE
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
CREATE SCHEMA single_node;
|
||||
SET search_path TO single_node;
|
||||
SET citus.shard_count TO 4;
|
||||
|
@ -1905,10 +1919,10 @@ NOTICE: executing the command locally: UPDATE single_node.another_schema_table_
|
|||
-- not that we ignore INSERT .. SELECT via coordinator as it relies on
|
||||
-- COPY command
|
||||
INSERT INTO another_schema_table SELECT * FROM another_schema_table;
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630515 AS citus_table_alias (a, b) SELECT a, b FROM single_node.another_schema_table_90630515 another_schema_table WHERE (a IS NOT NULL)
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630516 AS citus_table_alias (a, b) SELECT a, b FROM single_node.another_schema_table_90630516 another_schema_table WHERE (a IS NOT NULL)
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630517 AS citus_table_alias (a, b) SELECT a, b FROM single_node.another_schema_table_90630517 another_schema_table WHERE (a IS NOT NULL)
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630518 AS citus_table_alias (a, b) SELECT a, b FROM single_node.another_schema_table_90630518 another_schema_table WHERE (a IS NOT NULL)
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630515 AS citus_table_alias (a, b) SELECT another_schema_table.a, another_schema_table.b FROM single_node.another_schema_table_90630515 another_schema_table WHERE (another_schema_table.a IS NOT NULL)
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630516 AS citus_table_alias (a, b) SELECT another_schema_table.a, another_schema_table.b FROM single_node.another_schema_table_90630516 another_schema_table WHERE (another_schema_table.a IS NOT NULL)
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630517 AS citus_table_alias (a, b) SELECT another_schema_table.a, another_schema_table.b FROM single_node.another_schema_table_90630517 another_schema_table WHERE (another_schema_table.a IS NOT NULL)
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630518 AS citus_table_alias (a, b) SELECT another_schema_table.a, another_schema_table.b FROM single_node.another_schema_table_90630518 another_schema_table WHERE (another_schema_table.a IS NOT NULL)
|
||||
INSERT INTO another_schema_table SELECT b::int, a::int FROM another_schema_table;
|
||||
NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_90630515_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_90630515_to','SELECT b AS a, a AS b FROM single_node.another_schema_table_90630515 another_schema_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0
|
||||
NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_90630516_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_90630516_to','SELECT b AS a, a AS b FROM single_node.another_schema_table_90630516 another_schema_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0
|
||||
|
@ -1931,10 +1945,10 @@ NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_r
|
|||
NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_90630516_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_90630516_to','SELECT b AS a, a AS b FROM single_node.another_schema_table_90630516 another_schema_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0
|
||||
NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_90630517_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_90630517_to','SELECT b AS a, a AS b FROM single_node.another_schema_table_90630517 another_schema_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0
|
||||
NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_90630518_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_90630518_to','SELECT b AS a, a AS b FROM single_node.another_schema_table_90630518 another_schema_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630515 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_90630515_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer)
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630516 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_90630516_to_1}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer)
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630517 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_90630515_to_2,repartitioned_results_xxxxx_from_90630517_to_2,repartitioned_results_xxxxx_from_90630518_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer)
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630518 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_90630518_to_3}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer)
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630515 AS citus_table_alias (a, b) SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_90630515_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer)
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630516 AS citus_table_alias (a, b) SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_90630516_to_1}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer)
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630517 AS citus_table_alias (a, b) SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_90630515_to_2,repartitioned_results_xxxxx_from_90630517_to_2,repartitioned_results_xxxxx_from_90630518_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer)
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630518 AS citus_table_alias (a, b) SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_90630518_to_3}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer)
|
||||
SELECT * FROM another_schema_table WHERE a = 100 ORDER BY b;
|
||||
NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630517 another_schema_table WHERE (a OPERATOR(pg_catalog.=) 100) ORDER BY b
|
||||
a | b
|
||||
|
@ -1986,10 +2000,10 @@ NOTICE: executing the copy locally for colocated file with shard xxxxx
|
|||
NOTICE: executing the copy locally for colocated file with shard xxxxx
|
||||
NOTICE: executing the copy locally for colocated file with shard xxxxx
|
||||
NOTICE: executing the copy locally for colocated file with shard xxxxx
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630515 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630515'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630516 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630516'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630517 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630517'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630518 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630518'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630515 AS citus_table_alias (a, b) SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('insert_select_XXX_90630515'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630516 AS citus_table_alias (a, b) SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('insert_select_XXX_90630516'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630517 AS citus_table_alias (a, b) SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('insert_select_XXX_90630517'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630518 AS citus_table_alias (a, b) SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('insert_select_XXX_90630518'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING
|
||||
INSERT INTO another_schema_table SELECT * FROM another_schema_table ORDER BY a LIMIT 10 ON CONFLICT(a) DO UPDATE SET b = EXCLUDED.b + 1 RETURNING *;
|
||||
NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630515 another_schema_table WHERE true ORDER BY a LIMIT '10'::bigint
|
||||
NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630516 another_schema_table WHERE true ORDER BY a LIMIT '10'::bigint
|
||||
|
@ -1999,10 +2013,10 @@ NOTICE: executing the copy locally for colocated file with shard xxxxx
|
|||
NOTICE: executing the copy locally for colocated file with shard xxxxx
|
||||
NOTICE: executing the copy locally for colocated file with shard xxxxx
|
||||
NOTICE: executing the copy locally for colocated file with shard xxxxx
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630515 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630515'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO UPDATE SET b = (excluded.b OPERATOR(pg_catalog.+) 1) RETURNING citus_table_alias.a, citus_table_alias.b
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630516 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630516'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO UPDATE SET b = (excluded.b OPERATOR(pg_catalog.+) 1) RETURNING citus_table_alias.a, citus_table_alias.b
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630517 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630517'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO UPDATE SET b = (excluded.b OPERATOR(pg_catalog.+) 1) RETURNING citus_table_alias.a, citus_table_alias.b
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630518 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630518'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO UPDATE SET b = (excluded.b OPERATOR(pg_catalog.+) 1) RETURNING citus_table_alias.a, citus_table_alias.b
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630515 AS citus_table_alias (a, b) SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('insert_select_XXX_90630515'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO UPDATE SET b = (excluded.b OPERATOR(pg_catalog.+) 1) RETURNING citus_table_alias.a, citus_table_alias.b
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630516 AS citus_table_alias (a, b) SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('insert_select_XXX_90630516'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO UPDATE SET b = (excluded.b OPERATOR(pg_catalog.+) 1) RETURNING citus_table_alias.a, citus_table_alias.b
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630517 AS citus_table_alias (a, b) SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('insert_select_XXX_90630517'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO UPDATE SET b = (excluded.b OPERATOR(pg_catalog.+) 1) RETURNING citus_table_alias.a, citus_table_alias.b
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630518 AS citus_table_alias (a, b) SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('insert_select_XXX_90630518'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO UPDATE SET b = (excluded.b OPERATOR(pg_catalog.+) 1) RETURNING citus_table_alias.a, citus_table_alias.b
|
||||
a | b
|
||||
---------------------------------------------------------------------
|
||||
1 |
|
||||
|
@ -2029,10 +2043,10 @@ NOTICE: executing the copy locally for colocated file with shard xxxxx
|
|||
NOTICE: executing the copy locally for colocated file with shard xxxxx
|
||||
NOTICE: executing the copy locally for colocated file with shard xxxxx
|
||||
NOTICE: executing the copy locally for colocated file with shard xxxxx
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630519 AS citus_table_alias (key, value) SELECT key, value FROM read_intermediate_result('insert_select_XXX_90630519'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.value
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630520 AS citus_table_alias (key, value) SELECT key, value FROM read_intermediate_result('insert_select_XXX_90630520'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.value
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630521 AS citus_table_alias (key, value) SELECT key, value FROM read_intermediate_result('insert_select_XXX_90630521'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.value
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630522 AS citus_table_alias (key, value) SELECT key, value FROM read_intermediate_result('insert_select_XXX_90630522'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.value
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630519 AS citus_table_alias (key, value) SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('insert_select_XXX_90630519'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.value
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630520 AS citus_table_alias (key, value) SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('insert_select_XXX_90630520'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.value
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630521 AS citus_table_alias (key, value) SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('insert_select_XXX_90630521'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.value
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630522 AS citus_table_alias (key, value) SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('insert_select_XXX_90630522'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.value
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'text'::citus_copy_format) intermediate_result(value single_node.new_type)) cte_1
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
|
@ -2056,10 +2070,10 @@ NOTICE: executing the copy locally for colocated file with shard xxxxx
|
|||
NOTICE: executing the copy locally for colocated file with shard xxxxx
|
||||
NOTICE: executing the copy locally for colocated file with shard xxxxx
|
||||
NOTICE: executing the copy locally for colocated file with shard xxxxx
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630519 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630519'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630520 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630520'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630521 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630521'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630522 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630522'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630519 AS citus_table_alias (key, value, z) SELECT intermediate_result.key, intermediate_result.value, intermediate_result.z FROM read_intermediate_result('insert_select_XXX_90630519'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630520 AS citus_table_alias (key, value, z) SELECT intermediate_result.key, intermediate_result.value, intermediate_result.z FROM read_intermediate_result('insert_select_XXX_90630520'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630521 AS citus_table_alias (key, value, z) SELECT intermediate_result.key, intermediate_result.value, intermediate_result.z FROM read_intermediate_result('insert_select_XXX_90630521'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630522 AS citus_table_alias (key, value, z) SELECT intermediate_result.key, intermediate_result.value, intermediate_result.z FROM read_intermediate_result('insert_select_XXX_90630522'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z
|
||||
NOTICE: executing the command locally: SELECT bool_and((z IS NULL)) AS bool_and FROM (SELECT intermediate_result.z FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(z integer)) cte_1
|
||||
bool_and
|
||||
---------------------------------------------------------------------
|
||||
|
@ -2078,10 +2092,10 @@ NOTICE: executing the copy locally for colocated file with shard xxxxx
|
|||
NOTICE: executing the copy locally for colocated file with shard xxxxx
|
||||
NOTICE: executing the copy locally for colocated file with shard xxxxx
|
||||
NOTICE: executing the copy locally for colocated file with shard xxxxx
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630519 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630519'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.key, citus_table_alias.z
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630520 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630520'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.key, citus_table_alias.z
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630521 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630521'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.key, citus_table_alias.z
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630522 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630522'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.key, citus_table_alias.z
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630519 AS citus_table_alias (key, value, z) SELECT intermediate_result.key, intermediate_result.value, intermediate_result.z FROM read_intermediate_result('insert_select_XXX_90630519'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.key, citus_table_alias.z
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630520 AS citus_table_alias (key, value, z) SELECT intermediate_result.key, intermediate_result.value, intermediate_result.z FROM read_intermediate_result('insert_select_XXX_90630520'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.key, citus_table_alias.z
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630521 AS citus_table_alias (key, value, z) SELECT intermediate_result.key, intermediate_result.value, intermediate_result.z FROM read_intermediate_result('insert_select_XXX_90630521'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.key, citus_table_alias.z
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630522 AS citus_table_alias (key, value, z) SELECT intermediate_result.key, intermediate_result.value, intermediate_result.z FROM read_intermediate_result('insert_select_XXX_90630522'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.key, citus_table_alias.z
|
||||
NOTICE: executing the command locally: SELECT count(DISTINCT (key)::text) AS count, count(DISTINCT (z)::text) AS count FROM (SELECT intermediate_result.key, intermediate_result.z FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, z integer)) cte_1
|
||||
count | count
|
||||
---------------------------------------------------------------------
|
||||
|
@ -2158,10 +2172,10 @@ NOTICE: executing the copy locally for colocated file with shard xxxxx
|
|||
NOTICE: executing the copy locally for colocated file with shard xxxxx
|
||||
NOTICE: executing the copy locally for colocated file with shard xxxxx
|
||||
NOTICE: executing the copy locally for colocated file with shard xxxxx
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630515 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630515'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING RETURNING citus_table_alias.a, citus_table_alias.b
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630516 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630516'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING RETURNING citus_table_alias.a, citus_table_alias.b
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630517 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630517'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING RETURNING citus_table_alias.a, citus_table_alias.b
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630518 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630518'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING RETURNING citus_table_alias.a, citus_table_alias.b
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630515 AS citus_table_alias (a, b) SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('insert_select_XXX_90630515'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING RETURNING citus_table_alias.a, citus_table_alias.b
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630516 AS citus_table_alias (a, b) SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('insert_select_XXX_90630516'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING RETURNING citus_table_alias.a, citus_table_alias.b
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630517 AS citus_table_alias (a, b) SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('insert_select_XXX_90630517'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING RETURNING citus_table_alias.a, citus_table_alias.b
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630518 AS citus_table_alias (a, b) SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('insert_select_XXX_90630518'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING RETURNING citus_table_alias.a, citus_table_alias.b
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) cte_1
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
|
@ -2179,10 +2193,10 @@ NOTICE: executing the copy locally for colocated file with shard xxxxx
|
|||
NOTICE: executing the copy locally for colocated file with shard xxxxx
|
||||
NOTICE: executing the copy locally for colocated file with shard xxxxx
|
||||
NOTICE: executing the copy locally for colocated file with shard xxxxx
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630519 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630519'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630520 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630520'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630521 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630521'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630522 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630522'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630519 AS citus_table_alias (key, value, z) SELECT intermediate_result.key, intermediate_result.value, intermediate_result.z FROM read_intermediate_result('insert_select_XXX_90630519'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630520 AS citus_table_alias (key, value, z) SELECT intermediate_result.key, intermediate_result.value, intermediate_result.z FROM read_intermediate_result('insert_select_XXX_90630520'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630521 AS citus_table_alias (key, value, z) SELECT intermediate_result.key, intermediate_result.value, intermediate_result.z FROM read_intermediate_result('insert_select_XXX_90630521'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z
|
||||
NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630522 AS citus_table_alias (key, value, z) SELECT intermediate_result.key, intermediate_result.value, intermediate_result.z FROM read_intermediate_result('insert_select_XXX_90630522'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z
|
||||
NOTICE: executing the command locally: SELECT bool_and((z IS NULL)) AS bool_and FROM (SELECT intermediate_result.z FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(z integer)) cte_1
|
||||
bool_and
|
||||
---------------------------------------------------------------------
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -120,7 +120,7 @@ DEBUG: push down of limit count: 3
|
|||
DEBUG: generating subplan XXX_2 for subquery SELECT (min(value_3) OPERATOR(pg_catalog.*) (2)::double precision), (max(value_3) OPERATOR(pg_catalog./) (2)::double precision), sum(value_3) AS sum, count(value_3) AS count, avg(value_3) AS avg FROM public.users_table ORDER BY (min(value_3) OPERATOR(pg_catalog.*) (2)::double precision) DESC LIMIT 3
|
||||
DEBUG: push down of limit count: 3
|
||||
DEBUG: generating subplan XXX_3 for subquery SELECT min("time") AS min, max("time") AS max, count("time") AS count, count(*) FILTER (WHERE (user_id OPERATOR(pg_catalog.=) 3)) AS cnt_with_filter, count(*) FILTER (WHERE ((user_id)::text OPERATOR(pg_catalog.~~) '%3%'::text)) AS cnt_with_filter_2 FROM public.users_table ORDER BY (min("time")) DESC LIMIT 3
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo."?column?", foo."?column?_1" AS "?column?", foo.sum, foo.count, foo.avg, bar."?column?", bar."?column?_1" AS "?column?", bar.sum, bar.count, bar.avg, baz.min, baz.max, baz.count, baz.cnt_with_filter, baz.cnt_with_filter_2 FROM (SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?", intermediate_result.sum, intermediate_result.count, intermediate_result.avg FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer, sum bigint, count double precision, avg bigint)) foo("?column?", "?column?_1", sum, count, avg), (SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?", intermediate_result.sum, intermediate_result.count, intermediate_result.avg FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result("?column?" double precision, "?column?_1" double precision, sum double precision, count bigint, avg double precision)) bar("?column?", "?column?_1", sum, count, avg), (SELECT intermediate_result.min, intermediate_result.max, intermediate_result.count, intermediate_result.cnt_with_filter, intermediate_result.cnt_with_filter_2 FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(min timestamp without time zone, max timestamp without time zone, count bigint, cnt_with_filter bigint, cnt_with_filter_2 bigint)) baz ORDER BY foo."?column?" DESC
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo."?column?", foo."?column?_1", foo.sum, foo.count, foo.avg, bar."?column?", bar."?column?_1", bar.sum, bar.count, bar.avg, baz.min, baz.max, baz.count, baz.cnt_with_filter, baz.cnt_with_filter_2 FROM (SELECT intermediate_result."?column?", intermediate_result."?column?_1", intermediate_result.sum, intermediate_result.count, intermediate_result.avg FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer, sum bigint, count double precision, avg bigint)) foo("?column?", "?column?_1", sum, count, avg), (SELECT intermediate_result."?column?", intermediate_result."?column?_1", intermediate_result.sum, intermediate_result.count, intermediate_result.avg FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result("?column?" double precision, "?column?_1" double precision, sum double precision, count bigint, avg double precision)) bar("?column?", "?column?_1", sum, count, avg), (SELECT intermediate_result.min, intermediate_result.max, intermediate_result.count, intermediate_result.cnt_with_filter, intermediate_result.cnt_with_filter_2 FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(min timestamp without time zone, max timestamp without time zone, count bigint, cnt_with_filter bigint, cnt_with_filter_2 bigint)) baz ORDER BY foo."?column?" DESC
|
||||
?column? | ?column? | sum | count | avg | ?column? | ?column? | sum | count | avg | min | max | count | cnt_with_filter | cnt_with_filter_2
|
||||
---------------------------------------------------------------------
|
||||
2 | 3 | 376 | 101 | 4 | 0 | 2.5 | 273 | 101 | 2.7029702970297 | Wed Nov 22 18:19:49.944985 2017 | Thu Nov 23 17:30:34.635085 2017 | 101 | 17 | 17
|
||||
|
|
|
@ -476,7 +476,7 @@ DEBUG: generating subplan XXX_2 for subquery SELECT user_id AS user_id_2 FROM p
|
|||
DEBUG: generating subplan XXX_3 for subquery SELECT value_2 FROM public.events_table
|
||||
DEBUG: generating subplan XXX_4 for subquery SELECT t1.user_id, t2.user_id_2 FROM ((SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) t1 JOIN (SELECT intermediate_result.user_id_2 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id_2 integer)) t2 ON ((t1.user_id OPERATOR(pg_catalog.=) t2.user_id_2))) WHERE (t1.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)))
|
||||
DEBUG: generating subplan XXX_5 for subquery SELECT 1, 2 FROM public.events_table WHERE (value_2 OPERATOR(pg_catalog.=) user_id)
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT sum(user_id) AS sum FROM (SELECT intermediate_result.user_id, intermediate_result.user_id_2 FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, user_id_2 integer)) t3 WHERE (EXISTS (SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?" FROM read_intermediate_result('XXX_5'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer)))
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT sum(user_id) AS sum FROM (SELECT intermediate_result.user_id, intermediate_result.user_id_2 FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, user_id_2 integer)) t3 WHERE (EXISTS (SELECT intermediate_result."?column?", intermediate_result."?column?_1" FROM read_intermediate_result('XXX_5'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer)))
|
||||
sum
|
||||
---------------------------------------------------------------------
|
||||
67
|
||||
|
@ -530,7 +530,7 @@ DEBUG: generating subplan XXX_2 for subquery SELECT user_id AS user_id_2 FROM p
|
|||
DEBUG: generating subplan XXX_3 for subquery SELECT value_2 FROM public.events_table
|
||||
DEBUG: generating subplan XXX_4 for subquery SELECT t1.user_id, t2.user_id_2 FROM ((SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) t1 JOIN (SELECT intermediate_result.user_id_2 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id_2 integer)) t2 ON ((t1.user_id OPERATOR(pg_catalog.=) t2.user_id_2))) WHERE (t1.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)))
|
||||
DEBUG: generating subplan XXX_5 for subquery SELECT 1, 2 FROM public.events_table WHERE (value_2 OPERATOR(pg_catalog.=) (user_id OPERATOR(pg_catalog.+) 6))
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT sum(user_id) AS sum FROM (SELECT intermediate_result.user_id, intermediate_result.user_id_2 FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, user_id_2 integer)) t3 WHERE (NOT (EXISTS (SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?" FROM read_intermediate_result('XXX_5'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer))))
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT sum(user_id) AS sum FROM (SELECT intermediate_result.user_id, intermediate_result.user_id_2 FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, user_id_2 integer)) t3 WHERE (NOT (EXISTS (SELECT intermediate_result."?column?", intermediate_result."?column?_1" FROM read_intermediate_result('XXX_5'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer))))
|
||||
sum
|
||||
---------------------------------------------------------------------
|
||||
67
|
||||
|
@ -558,7 +558,7 @@ WHERE row(user_id, value_1) =
|
|||
DEBUG: generating subplan XXX_1 for subquery SELECT (min(user_id) OPERATOR(pg_catalog.+) 1), (min(user_id) OPERATOR(pg_catalog.+) 1) FROM public.events_table
|
||||
DEBUG: push down of limit count: 10
|
||||
DEBUG: generating subplan XXX_2 for subquery SELECT user_id, value_1 FROM public.users_table ORDER BY user_id, value_1 LIMIT 10
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id, value_1 FROM (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) t3 WHERE ((user_id, value_1) OPERATOR(pg_catalog.=) (SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?" FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer)))
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id, value_1 FROM (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) t3 WHERE ((user_id, value_1) OPERATOR(pg_catalog.=) (SELECT intermediate_result."?column?", intermediate_result."?column?_1" FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer)))
|
||||
user_id | value_1
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
|
|
@ -578,11 +578,13 @@ SELECT create_reference_table('reference_table');
|
|||
|
||||
(1 row)
|
||||
|
||||
SELECT public.coordinator_plan_with_subplans($Q$
|
||||
EXPLAIN (COSTS OFF) WITH cte AS (
|
||||
SELECT application_name AS text_col
|
||||
FROM pg_stat_activity
|
||||
) SELECT * FROM reference_table JOIN cte USING (text_col);
|
||||
QUERY PLAN
|
||||
$Q$);
|
||||
coordinator_plan_with_subplans
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
-> Distributed Subplan XXX_1
|
||||
|
@ -590,38 +592,17 @@ EXPLAIN (COSTS OFF) WITH cte AS (
|
|||
-> Distributed Subplan XXX_2
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Task Count: 1
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Hash Left Join
|
||||
Hash Cond: (intermediate_result.usesysid = u.oid)
|
||||
-> Hash Left Join
|
||||
Hash Cond: (intermediate_result.datid = d.oid)
|
||||
-> Function Scan on read_intermediate_result intermediate_result
|
||||
-> Hash
|
||||
-> Seq Scan on pg_database d
|
||||
-> Hash
|
||||
-> Seq Scan on pg_authid u
|
||||
Task Count: 1
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Merge Join
|
||||
Merge Cond: (intermediate_result.application_name = reference_table.text_col)
|
||||
-> Sort
|
||||
Sort Key: intermediate_result.application_name
|
||||
-> Function Scan on read_intermediate_result intermediate_result
|
||||
-> Sort
|
||||
Sort Key: reference_table.text_col
|
||||
-> Seq Scan on reference_table_1512000 reference_table
|
||||
(30 rows)
|
||||
(7 rows)
|
||||
|
||||
CREATE OR REPLACE VIEW view_on_views AS SELECT pg_stat_activity.application_name, pg_locks.pid FROM pg_stat_activity, pg_locks;
|
||||
SELECT public.coordinator_plan_with_subplans($Q$
|
||||
EXPLAIN (COSTS OFF) WITH cte AS (
|
||||
SELECT application_name AS text_col
|
||||
FROM view_on_views
|
||||
) SELECT * FROM reference_table JOIN cte USING (text_col);
|
||||
QUERY PLAN
|
||||
$Q$);
|
||||
coordinator_plan_with_subplans
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
-> Distributed Subplan XXX_1
|
||||
|
@ -629,18 +610,7 @@ EXPLAIN (COSTS OFF) WITH cte AS (
|
|||
-> Function Scan on pg_stat_get_activity s
|
||||
-> Function Scan on pg_lock_status l
|
||||
Task Count: 1
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Merge Join
|
||||
Merge Cond: (intermediate_result.text_col = reference_table.text_col)
|
||||
-> Sort
|
||||
Sort Key: intermediate_result.text_col
|
||||
-> Function Scan on read_intermediate_result intermediate_result
|
||||
-> Sort
|
||||
Sort Key: reference_table.text_col
|
||||
-> Seq Scan on reference_table_1512000 reference_table
|
||||
(17 rows)
|
||||
(6 rows)
|
||||
|
||||
DROP SCHEMA subquery_view CASCADE;
|
||||
NOTICE: drop cascades to 19 other objects
|
||||
|
|
|
@ -1491,10 +1491,12 @@ LIMIT 5;
|
|||
(17 rows)
|
||||
|
||||
-- Grouping can be pushed down with aggregates even when window function can't
|
||||
SELECT public.plan_without_result_lines($Q$
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT user_id, count(value_1), stddev(value_1), count(user_id) OVER (PARTITION BY random())
|
||||
FROM users_table GROUP BY user_id HAVING avg(value_1) > 2 LIMIT 1;
|
||||
QUERY PLAN
|
||||
$Q$);
|
||||
plan_without_result_lines
|
||||
---------------------------------------------------------------------
|
||||
Limit
|
||||
-> WindowAgg
|
||||
|
|
|
@ -1495,10 +1495,12 @@ LIMIT 5;
|
|||
(18 rows)
|
||||
|
||||
-- Grouping can be pushed down with aggregates even when window function can't
|
||||
SELECT public.plan_without_result_lines($Q$
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT user_id, count(value_1), stddev(value_1), count(user_id) OVER (PARTITION BY random())
|
||||
FROM users_table GROUP BY user_id HAVING avg(value_1) > 2 LIMIT 1;
|
||||
QUERY PLAN
|
||||
$Q$);
|
||||
plan_without_result_lines
|
||||
---------------------------------------------------------------------
|
||||
Limit
|
||||
-> WindowAgg
|
||||
|
|
|
@ -58,6 +58,7 @@ test: cte_inline recursive_view_local_table values sequences_with_different_type
|
|||
test: pg13 pg12
|
||||
# run pg14 sequentially as it syncs metadata
|
||||
test: pg14
|
||||
test: pg15
|
||||
test: drop_column_partitioned_table
|
||||
test: tableam
|
||||
|
||||
|
|
|
@ -1,3 +1,13 @@
|
|||
--
|
||||
-- CITUS_LOCAL_TABLES_QUERIES
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
|
||||
\set VERBOSITY terse
|
||||
|
||||
SET citus.next_shard_id TO 1509000;
|
||||
|
|
|
@ -130,11 +130,15 @@ INSERT INTO another_columnar_table SELECT generate_series(0,5);
|
|||
EXPLAIN (analyze on, costs off, timing off, summary off)
|
||||
SELECT a, y FROM multi_column_chunk_filtering, another_columnar_table WHERE x > 1;
|
||||
|
||||
SELECT plan_without_arrows($Q$
|
||||
EXPLAIN (costs off, timing off, summary off)
|
||||
SELECT y, * FROM another_columnar_table;
|
||||
$Q$);
|
||||
|
||||
SELECT plan_without_arrows($Q$
|
||||
EXPLAIN (costs off, timing off, summary off)
|
||||
SELECT *, x FROM another_columnar_table;
|
||||
$Q$);
|
||||
|
||||
EXPLAIN (costs off, timing off, summary off)
|
||||
SELECT y, another_columnar_table FROM another_columnar_table;
|
||||
|
|
|
@ -428,11 +428,15 @@ SELECT create_distributed_table('weird_col_explain', 'bbbbbbbbbbbbbbbbbbbbbbbbb\
|
|||
EXPLAIN (COSTS OFF, SUMMARY OFF)
|
||||
SELECT * FROM weird_col_explain;
|
||||
|
||||
\set VERBOSITY terse
|
||||
SELECT public.plan_without_result_lines($Q$
|
||||
EXPLAIN (COSTS OFF, SUMMARY OFF)
|
||||
SELECT *, "bbbbbbbbbbbbbbbbbbbbbbbbb\!bbbb'bbbbbbbbbbbbbbbbbbbbb''bbbbbbbb"
|
||||
FROM weird_col_explain
|
||||
WHERE "bbbbbbbbbbbbbbbbbbbbbbbbb\!bbbb'bbbbbbbbbbbbbbbbbbbbb''bbbbbbbb" * 2 >
|
||||
"aaaaaaaaaaaa$aaaaaa$$aaaaaaaaaaaaaaaaaaaaaaaaaaaaa'aaaaaaaa'$a'!";
|
||||
$Q$);
|
||||
\set VERBOSITY default
|
||||
|
||||
-- should not project any columns
|
||||
EXPLAIN (COSTS OFF, SUMMARY OFF)
|
||||
|
|
|
@ -1,4 +1,14 @@
|
|||
--
|
||||
-- COORDINATOR_SHOULDHAVESHARDS
|
||||
--
|
||||
-- Test queries on a distributed table with shards on the coordinator
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
|
||||
CREATE SCHEMA coordinator_shouldhaveshards;
|
||||
SET search_path TO coordinator_shouldhaveshards;
|
||||
|
|
|
@ -1,3 +1,13 @@
|
|||
--
|
||||
-- CTE_INLINE
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
|
||||
CREATE SCHEMA cte_inline;
|
||||
SET search_path TO cte_inline;
|
||||
SET citus.next_shard_id TO 1960000;
|
||||
|
@ -220,6 +230,10 @@ FROM
|
|||
USING (key);
|
||||
|
||||
-- EXPLAIN should show the differences between MATERIALIZED and NOT MATERIALIZED
|
||||
|
||||
\set VERBOSITY terse
|
||||
|
||||
SELECT public.coordinator_plan_with_subplans($Q$
|
||||
EXPLAIN (COSTS OFF) WITH cte_1 AS (SELECT * FROM test_table)
|
||||
SELECT
|
||||
count(*)
|
||||
|
@ -228,6 +242,22 @@ FROM
|
|||
JOIN
|
||||
cte_1 as second_entry
|
||||
USING (key);
|
||||
$Q$);
|
||||
|
||||
\set VERBOSITY default
|
||||
|
||||
-- enable_group_by_reordering is a new GUC introduced in PG15
|
||||
-- it does some optimization of the order of group by keys which results
|
||||
-- in a different explain output plan between PG13/14 and PG15
|
||||
-- Hence we set that GUC to off.
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
\if :server_version_ge_15
|
||||
SET enable_group_by_reordering TO off;
|
||||
\endif
|
||||
SELECT DISTINCT 1 FROM run_command_on_workers($$ALTER SYSTEM SET enable_group_by_reordering TO off;$$);
|
||||
SELECT run_command_on_workers($$SELECT pg_reload_conf()$$);
|
||||
|
||||
EXPLAIN (COSTS OFF) WITH cte_1 AS NOT MATERIALIZED (SELECT * FROM test_table)
|
||||
SELECT
|
||||
|
@ -238,6 +268,11 @@ FROM
|
|||
cte_1 as second_entry
|
||||
USING (key);
|
||||
|
||||
\if :server_version_ge_15
|
||||
RESET enable_group_by_reordering;
|
||||
\endif
|
||||
SELECT DISTINCT 1 FROM run_command_on_workers($$ALTER SYSTEM RESET enable_group_by_reordering;$$);
|
||||
SELECT run_command_on_workers($$SELECT pg_reload_conf()$$);
|
||||
|
||||
|
||||
-- ctes with volatile functions are not
|
||||
|
|
|
@ -415,10 +415,15 @@ SELECT operation_type, product_sku, state_code FROM record_sale ORDER BY 1,2,3;
|
|||
--
|
||||
--Test ALTER TRIGGER
|
||||
--
|
||||
-- Pre PG15, renaming the trigger on the parent table didn't rename the same trigger on
|
||||
-- the children as well. Hence, let's not print the trigger names of the children
|
||||
-- In PG15, rename is consistent for all partitions of the parent
|
||||
-- This is tested in pg15.sql file.
|
||||
|
||||
CREATE VIEW sale_triggers AS
|
||||
SELECT tgname, tgrelid::regclass, tgenabled
|
||||
FROM pg_trigger
|
||||
WHERE tgrelid::regclass::text like 'sale%'
|
||||
WHERE tgrelid::regclass::text = 'sale'
|
||||
ORDER BY 1, 2;
|
||||
|
||||
SELECT * FROM sale_triggers ORDER BY 1,2;
|
||||
|
|
|
@ -1,6 +1,11 @@
|
|||
--
|
||||
-- GRANT_ON_SCHEMA_PROPAGATION
|
||||
--
|
||||
-- this test has different output for PG13/14 compared to PG15
|
||||
-- In PG15, public schema is owned by pg_database_owner role
|
||||
-- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
|
||||
-- test grants are propagated when the schema is
|
||||
CREATE SCHEMA dist_schema;
|
||||
|
|
|
@ -1,3 +1,13 @@
|
|||
--
|
||||
-- INSERT_SELECT_REPARTITION
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
|
||||
-- tests behaviour of INSERT INTO ... SELECT with repartitioning
|
||||
CREATE SCHEMA insert_select_repartition;
|
||||
SET search_path TO 'insert_select_repartition';
|
||||
|
@ -354,7 +364,9 @@ INSERT INTO target_table
|
|||
SELECT a, max(b) FROM source_table
|
||||
WHERE a BETWEEN 1 AND 2 GROUP BY a;
|
||||
|
||||
SELECT public.coordinator_plan($Q$
|
||||
EXPLAIN EXECUTE insert_plan;
|
||||
$Q$);
|
||||
|
||||
SET client_min_messages TO DEBUG1;
|
||||
EXECUTE insert_plan;
|
||||
|
@ -623,7 +635,9 @@ DO UPDATE SET
|
|||
create table table_with_sequences (x int, y int, z bigserial);
|
||||
insert into table_with_sequences values (1,1);
|
||||
select create_distributed_table('table_with_sequences','x');
|
||||
SELECT public.plan_without_result_lines($Q$
|
||||
explain (costs off) insert into table_with_sequences select y, x from table_with_sequences;
|
||||
$Q$);
|
||||
|
||||
-- verify that we don't report repartitioned insert/select for tables
|
||||
-- with user-defined sequences.
|
||||
|
@ -631,7 +645,9 @@ CREATE SEQUENCE user_defined_sequence;
|
|||
create table table_with_user_sequences (x int, y int, z bigint default nextval('user_defined_sequence'));
|
||||
insert into table_with_user_sequences values (1,1);
|
||||
select create_distributed_table('table_with_user_sequences','x');
|
||||
SELECT public.plan_without_result_lines($Q$
|
||||
explain (costs off) insert into table_with_user_sequences select y, x from table_with_user_sequences;
|
||||
$Q$);
|
||||
|
||||
-- clean-up
|
||||
SET client_min_messages TO WARNING;
|
||||
|
|
|
@ -1,3 +1,13 @@
|
|||
--
|
||||
-- INTERMEDIATE_RESULT_PRUNING
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
|
||||
CREATE SCHEMA intermediate_result_pruning;
|
||||
SET search_path TO intermediate_result_pruning;
|
||||
SET citus.log_intermediate_results TO TRUE;
|
||||
|
|
|
@ -1,9 +1,22 @@
|
|||
--
|
||||
-- ISSUE_5248
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- backup modes of Postgres. Specifically, there is a renaming
|
||||
-- issue: pg_stop_backup PRE PG15 vs pg_backup_stop PG15+
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
|
||||
CREATE SCHEMA issue_5248;
|
||||
SET search_path TO issue_5248;
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.next_shard_id TO 3013000;
|
||||
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
|
||||
create table countries(
|
||||
id serial primary key
|
||||
, name text
|
||||
|
@ -190,7 +203,11 @@ FROM (
|
|||
(
|
||||
SELECT utc_offset
|
||||
FROM pg_catalog.pg_timezone_names limit 1 offset 4) limit 91) AS subq_3
|
||||
\if :server_version_ge_15
|
||||
WHERE pg_catalog.pg_backup_stop() > cast(NULL AS record) limit 100;
|
||||
\else
|
||||
WHERE pg_catalog.pg_stop_backup() > cast(NULL AS pg_lsn) limit 100;
|
||||
\endif
|
||||
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP SCHEMA issue_5248 CASCADE;
|
||||
|
|
|
@ -1,3 +1,13 @@
|
|||
--
|
||||
-- LOCAL_SHARD_EXECUTION
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
|
||||
CREATE SCHEMA local_shard_execution;
|
||||
SET search_path TO local_shard_execution;
|
||||
|
||||
|
|
|
@ -1,3 +1,13 @@
|
|||
--
|
||||
-- LOCAL_SHARD_EXECUTION_REPLICATED
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
|
||||
CREATE SCHEMA local_shard_execution_replicated;
|
||||
SET search_path TO local_shard_execution_replicated;
|
||||
|
||||
|
|
|
@ -1,7 +1,12 @@
|
|||
--
|
||||
-- MULTI_DEPARSE_SHARD_QUERY
|
||||
--
|
||||
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
|
||||
SET citus.next_shard_id TO 13100000;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
|
|
@ -122,9 +122,11 @@ EXPLAIN (COSTS FALSE, FORMAT TEXT)
|
|||
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
|
||||
|
||||
-- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output)
|
||||
SELECT public.plan_normalize_memory($Q$
|
||||
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE)
|
||||
SELECT l_quantity, count(*) count_quantity FROM lineitem
|
||||
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
|
||||
$Q$);
|
||||
|
||||
-- EXPLAIN ANALYZE doesn't show worker tasks for repartition joins yet
|
||||
SET citus.shard_count TO 3;
|
||||
|
@ -142,9 +144,11 @@ END;
|
|||
DROP TABLE t1, t2;
|
||||
|
||||
-- Test query text output, with ANALYZE ON
|
||||
SELECT public.plan_normalize_memory($Q$
|
||||
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE)
|
||||
SELECT l_quantity, count(*) count_quantity FROM lineitem
|
||||
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
|
||||
$Q$);
|
||||
|
||||
-- Test query text output, with ANALYZE OFF
|
||||
EXPLAIN (COSTS FALSE, ANALYZE FALSE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE)
|
||||
|
@ -250,6 +254,20 @@ FROM
|
|||
user_id) AS subquery;
|
||||
|
||||
-- Union and left join subquery pushdown
|
||||
|
||||
-- enable_group_by_reordering is a new GUC introduced in PG15
|
||||
-- it does some optimization of the order of group by keys which results
|
||||
-- in a different explain output plan between PG13/14 and PG15
|
||||
-- Hence we set that GUC to off.
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
\if :server_version_ge_15
|
||||
SET enable_group_by_reordering TO off;
|
||||
\endif
|
||||
SELECT DISTINCT 1 FROM run_command_on_workers($$ALTER SYSTEM SET enable_group_by_reordering TO off;$$);
|
||||
SELECT run_command_on_workers($$SELECT pg_reload_conf()$$);
|
||||
|
||||
EXPLAIN (COSTS OFF)
|
||||
SELECT
|
||||
avg(array_length(events, 1)) AS event_average,
|
||||
|
@ -385,6 +403,12 @@ GROUP BY
|
|||
ORDER BY
|
||||
count_pay;
|
||||
|
||||
\if :server_version_ge_15
|
||||
RESET enable_group_by_reordering;
|
||||
\endif
|
||||
SELECT DISTINCT 1 FROM run_command_on_workers($$ALTER SYSTEM RESET enable_group_by_reordering;$$);
|
||||
SELECT run_command_on_workers($$SELECT pg_reload_conf()$$);
|
||||
|
||||
-- Lateral join subquery pushdown
|
||||
-- set subquery_pushdown due to limit in the query
|
||||
SET citus.subquery_pushdown to ON;
|
||||
|
@ -468,9 +492,11 @@ EXPLAIN (COSTS FALSE)
|
|||
DELETE FROM lineitem_hash_part;
|
||||
|
||||
-- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output)
|
||||
SELECT public.plan_normalize_memory($Q$
|
||||
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE)
|
||||
SELECT l_quantity, count(*) count_quantity FROM lineitem
|
||||
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
|
||||
$Q$);
|
||||
|
||||
SET citus.explain_all_tasks TO off;
|
||||
|
||||
|
|
|
@ -294,6 +294,14 @@ SELECT * FROM multi_extension.print_extension_changes();
|
|||
|
||||
-- recreate public schema, and recreate citus_tables in the public schema by default
|
||||
CREATE SCHEMA public;
|
||||
-- In PG15, public schema is owned by pg_database_owner role
|
||||
-- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
\if :server_version_ge_15
|
||||
ALTER SCHEMA public OWNER TO pg_database_owner;
|
||||
\endif
|
||||
GRANT ALL ON SCHEMA public TO public;
|
||||
ALTER EXTENSION citus UPDATE TO '9.5-1';
|
||||
ALTER EXTENSION citus UPDATE TO '10.0-4';
|
||||
|
|
|
@ -1,6 +1,12 @@
|
|||
--
|
||||
-- MULTI_INSERT_SELECT
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
|
||||
SET citus.next_shard_id TO 13300000;
|
||||
SET citus.next_placement_id TO 13300000;
|
||||
|
|
|
@ -1,3 +1,13 @@
|
|||
--
|
||||
-- MULTI_INSERT_SELECT_CONFLICT
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
|
||||
CREATE SCHEMA on_conflict;
|
||||
SET search_path TO on_conflict, public;
|
||||
SET citus.next_shard_id TO 1900000;
|
||||
|
|
|
@ -1,6 +1,11 @@
|
|||
--
|
||||
-- MULTI_METADATA_SYNC
|
||||
--
|
||||
-- this test has different output for PG13/14 compared to PG15
|
||||
-- In PG15, public schema is owned by pg_database_owner role
|
||||
-- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
|
||||
-- Tests for metadata snapshot functions, metadata syncing functions and propagation of
|
||||
-- metadata changes to MX tables.
|
||||
|
|
|
@ -1,4 +1,14 @@
|
|||
--
|
||||
-- MULTI_MX_INSERT_SELECT_REPARTITION
|
||||
--
|
||||
-- Test behaviour of repartitioned INSERT ... SELECT in MX setup
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
|
||||
CREATE SCHEMA multi_mx_insert_select_repartition;
|
||||
SET search_path TO multi_mx_insert_select_repartition;
|
||||
|
|
|
@ -113,11 +113,13 @@ EXPLAIN (COSTS FALSE)
|
|||
-- check the plan if the hash aggreate is disabled. We expect to see sort+unique
|
||||
-- instead of aggregate plan node to handle distinct.
|
||||
SET enable_hashagg TO off;
|
||||
SELECT public.plan_without_result_lines($Q$
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT DISTINCT count(*)
|
||||
FROM lineitem_hash_part
|
||||
GROUP BY l_suppkey, l_linenumber
|
||||
ORDER BY 1;
|
||||
$Q$);
|
||||
|
||||
SET enable_hashagg TO on;
|
||||
|
||||
|
@ -140,12 +142,14 @@ EXPLAIN (COSTS FALSE)
|
|||
-- check the plan if the hash aggreate is disabled. Similar to the explain of
|
||||
-- the query above.
|
||||
SET enable_hashagg TO off;
|
||||
SELECT public.plan_without_result_lines($Q$
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT DISTINCT l_suppkey, count(*)
|
||||
FROM lineitem_hash_part
|
||||
GROUP BY l_suppkey, l_linenumber
|
||||
ORDER BY 1
|
||||
LIMIT 10;
|
||||
$Q$);
|
||||
|
||||
SET enable_hashagg TO on;
|
||||
|
||||
|
@ -169,12 +173,14 @@ EXPLAIN (COSTS FALSE)
|
|||
-- check the plan if the hash aggreate is disabled. This explain errors out due
|
||||
-- to a bug right now, expectation must be corrected after fixing it.
|
||||
SET enable_hashagg TO off;
|
||||
SELECT public.plan_without_result_lines($Q$
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT DISTINCT l_suppkey, avg(l_partkey)
|
||||
FROM lineitem_hash_part
|
||||
GROUP BY l_suppkey, l_linenumber
|
||||
ORDER BY 1,2
|
||||
LIMIT 10;
|
||||
$Q$);
|
||||
|
||||
SET enable_hashagg TO on;
|
||||
|
||||
|
@ -197,12 +203,14 @@ EXPLAIN (COSTS FALSE)
|
|||
-- check the plan if the hash aggreate is disabled. We expect to see sort+unique to
|
||||
-- handle distinct on.
|
||||
SET enable_hashagg TO off;
|
||||
SELECT public.plan_without_result_lines($Q$
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT DISTINCT ON (l_suppkey) avg(l_partkey)
|
||||
FROM lineitem_hash_part
|
||||
GROUP BY l_suppkey, l_linenumber
|
||||
ORDER BY l_suppkey,1
|
||||
LIMIT 10;
|
||||
$Q$);
|
||||
|
||||
SET enable_hashagg TO on;
|
||||
|
||||
|
@ -224,12 +232,14 @@ EXPLAIN (COSTS FALSE)
|
|||
-- check the plan if the hash aggreate is disabled. This explain errors out due
|
||||
-- to a bug right now, expectation must be corrected after fixing it.
|
||||
SET enable_hashagg TO off;
|
||||
SELECT public.plan_without_result_lines($Q$
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT DISTINCT avg(ceil(l_partkey / 2))
|
||||
FROM lineitem_hash_part
|
||||
GROUP BY l_suppkey, l_linenumber
|
||||
ORDER BY 1
|
||||
LIMIT 10;
|
||||
$Q$);
|
||||
|
||||
SET enable_hashagg TO on;
|
||||
|
||||
|
@ -251,12 +261,14 @@ EXPLAIN (COSTS FALSE)
|
|||
-- check the plan if the hash aggreate is disabled. This explain errors out due
|
||||
-- to a bug right now, expectation must be corrected after fixing it.
|
||||
SET enable_hashagg TO off;
|
||||
SELECT public.plan_without_result_lines($Q$
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT DISTINCT sum(l_suppkey) + count(l_partkey) AS dis
|
||||
FROM lineitem_hash_part
|
||||
GROUP BY l_suppkey, l_linenumber
|
||||
ORDER BY 1
|
||||
LIMIT 10;
|
||||
$Q$);
|
||||
|
||||
SET enable_hashagg TO on;
|
||||
|
||||
|
@ -270,22 +282,26 @@ SELECT DISTINCT *
|
|||
|
||||
-- explain the query to see actual plan. We expect to see only one aggregation
|
||||
-- node since group by columns guarantees the uniqueness.
|
||||
SELECT coordinator_plan($Q$
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT DISTINCT *
|
||||
FROM lineitem_hash_part
|
||||
GROUP BY 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16
|
||||
ORDER BY 1,2
|
||||
LIMIT 10;
|
||||
$Q$);
|
||||
|
||||
-- check the plan if the hash aggreate is disabled. We expect to see only one
|
||||
-- aggregation node since group by columns guarantees the uniqueness.
|
||||
SET enable_hashagg TO off;
|
||||
SELECT coordinator_plan($Q$
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT DISTINCT *
|
||||
FROM lineitem_hash_part
|
||||
GROUP BY 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16
|
||||
ORDER BY 1,2
|
||||
LIMIT 10;
|
||||
$Q$);
|
||||
|
||||
SET enable_hashagg TO on;
|
||||
|
||||
|
@ -329,30 +345,36 @@ EXPLAIN (COSTS FALSE)
|
|||
|
||||
-- check the plan if the hash aggreate is disabled
|
||||
SET enable_hashagg TO off;
|
||||
SELECT public.plan_without_result_lines($Q$
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT DISTINCT ceil(count(case when l_partkey > 100000 THEN 1 ELSE 0 END) / 2) AS count
|
||||
FROM lineitem_hash_part
|
||||
GROUP BY l_suppkey
|
||||
ORDER BY 1;
|
||||
$Q$);
|
||||
|
||||
SET enable_hashagg TO on;
|
||||
|
||||
-- explain the query to see actual plan with array_agg aggregation.
|
||||
SELECT coordinator_plan($Q$
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT DISTINCT array_agg(l_linenumber), array_length(array_agg(l_linenumber), 1)
|
||||
FROM lineitem_hash_part
|
||||
GROUP BY l_orderkey
|
||||
ORDER BY 2
|
||||
LIMIT 15;
|
||||
$Q$);
|
||||
|
||||
-- check the plan if the hash aggreate is disabled.
|
||||
SET enable_hashagg TO off;
|
||||
SELECT coordinator_plan($Q$
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT DISTINCT array_agg(l_linenumber), array_length(array_agg(l_linenumber), 1)
|
||||
FROM lineitem_hash_part
|
||||
GROUP BY l_orderkey
|
||||
ORDER BY 2
|
||||
LIMIT 15;
|
||||
$Q$);
|
||||
|
||||
SET enable_hashagg TO on;
|
||||
|
||||
|
|
|
@ -20,10 +20,15 @@ END;
|
|||
$$LANGUAGE plpgsql;
|
||||
|
||||
-- Create a function to ignore worker plans in explain output
|
||||
-- Also remove extra "-> Result" lines for PG15 support
|
||||
CREATE OR REPLACE FUNCTION coordinator_plan(explain_command text, out query_plan text)
|
||||
RETURNS SETOF TEXT AS $$
|
||||
BEGIN
|
||||
FOR query_plan IN execute explain_command LOOP
|
||||
IF (query_plan LIKE '%-> Result%' OR query_plan = 'Result')
|
||||
THEN
|
||||
CONTINUE;
|
||||
END IF;
|
||||
RETURN next;
|
||||
IF query_plan LIKE '%Task Count:%'
|
||||
THEN
|
||||
|
@ -33,6 +38,69 @@ BEGIN
|
|||
RETURN;
|
||||
END; $$ language plpgsql;
|
||||
|
||||
-- Create a function to ignore worker plans in explain output
|
||||
-- It also shows task count for plan and subplans
|
||||
-- Also remove extra "-> Result" lines for PG15 support
|
||||
CREATE OR REPLACE FUNCTION coordinator_plan_with_subplans(explain_command text, out query_plan text)
|
||||
RETURNS SETOF TEXT AS $$
|
||||
DECLARE
|
||||
task_count_line_reached boolean := false;
|
||||
BEGIN
|
||||
FOR query_plan IN execute explain_command LOOP
|
||||
IF (query_plan LIKE '%-> Result%' OR query_plan = 'Result') THEN
|
||||
CONTINUE;
|
||||
END IF;
|
||||
IF NOT task_count_line_reached THEN
|
||||
RETURN next;
|
||||
END IF;
|
||||
IF query_plan LIKE '%Task Count:%' THEN
|
||||
IF NOT task_count_line_reached THEN
|
||||
SELECT true INTO task_count_line_reached;
|
||||
ELSE
|
||||
RETURN next;
|
||||
END IF;
|
||||
END IF;
|
||||
END LOOP;
|
||||
RETURN;
|
||||
END; $$ language plpgsql;
|
||||
|
||||
-- Create a function to ignore "-> Result" lines for PG15 support
|
||||
-- In PG15 there are some extra "-> Result" lines
|
||||
CREATE OR REPLACE FUNCTION plan_without_result_lines(explain_command text, out query_plan text)
|
||||
RETURNS SETOF TEXT AS $$
|
||||
BEGIN
|
||||
FOR query_plan IN execute explain_command LOOP
|
||||
IF (query_plan LIKE '%-> Result%' OR query_plan = 'Result') THEN
|
||||
CONTINUE;
|
||||
END IF;
|
||||
RETURN next;
|
||||
END LOOP;
|
||||
RETURN;
|
||||
END; $$ language plpgsql;
|
||||
|
||||
-- Create a function to normalize Memory Usage, Buckets, Batches
|
||||
CREATE OR REPLACE FUNCTION plan_normalize_memory(explain_command text, out query_plan text)
|
||||
RETURNS SETOF TEXT AS $$
|
||||
BEGIN
|
||||
FOR query_plan IN execute explain_command LOOP
|
||||
query_plan := regexp_replace(query_plan, '(Memory( Usage)?|Buckets|Batches): \S*', '\1: xxx', 'g');
|
||||
RETURN NEXT;
|
||||
END LOOP;
|
||||
END; $$ language plpgsql;
|
||||
|
||||
-- Create a function to remove arrows from the explain plan
|
||||
CREATE OR REPLACE FUNCTION plan_without_arrows(explain_command text, out query_plan text)
|
||||
RETURNS SETOF TEXT AS $$
|
||||
BEGIN
|
||||
FOR query_plan IN execute explain_command LOOP
|
||||
IF (query_plan LIKE '%-> Result%' OR query_plan = 'Result') THEN
|
||||
CONTINUE;
|
||||
END IF;
|
||||
query_plan := regexp_replace(query_plan, '( )*-> (.*)', '\2', 'g');
|
||||
RETURN NEXT;
|
||||
END LOOP;
|
||||
END; $$ language plpgsql;
|
||||
|
||||
-- helper function that returns true if output of given explain has "is not null" (case in-sensitive)
|
||||
CREATE OR REPLACE FUNCTION explain_has_is_not_null(explain_command text)
|
||||
RETURNS BOOLEAN AS $$
|
||||
|
|
|
@ -374,6 +374,7 @@ VACUUM ANALYZE users_table;
|
|||
-- explain tests
|
||||
EXPLAIN (COSTS FALSE) SELECT user_id FROM recent_selected_users GROUP BY 1 ORDER BY 1;
|
||||
|
||||
SELECT public.coordinator_plan($Q$
|
||||
EXPLAIN (COSTS FALSE) SELECT *
|
||||
FROM (
|
||||
(SELECT user_id FROM recent_users)
|
||||
|
@ -381,6 +382,7 @@ EXPLAIN (COSTS FALSE) SELECT *
|
|||
(SELECT user_id FROM selected_users) ) u
|
||||
WHERE user_id < 4 AND user_id > 1
|
||||
ORDER BY user_id;
|
||||
$Q$);
|
||||
|
||||
EXPLAIN (COSTS FALSE) SELECT et.* FROM recent_10_users JOIN events_table et USING(user_id) ORDER BY et.time DESC LIMIT 10;
|
||||
SET citus.subquery_pushdown to ON;
|
||||
|
|
|
@ -1,3 +1,13 @@
|
|||
--
|
||||
-- MX_COORDINATOR_SHOULDHAVESHARDS
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
|
||||
CREATE SCHEMA mx_coordinator_shouldhaveshards;
|
||||
SET search_path TO mx_coordinator_shouldhaveshards;
|
||||
|
||||
|
|
|
@ -348,8 +348,12 @@ ROLLBACK;
|
|||
|
||||
BEGIN;
|
||||
-- drop some of the columns not having "generated always as stored" expressions
|
||||
-- this would drop generated columns too
|
||||
ALTER TABLE generated_stored_ref DROP COLUMN col_1;
|
||||
-- PRE PG15, this would drop generated columns too
|
||||
-- In PG15, CASCADE option must be specified
|
||||
-- Relevant PG Commit: cb02fcb4c95bae08adaca1202c2081cfc81a28b5
|
||||
SET client_min_messages TO WARNING;
|
||||
ALTER TABLE generated_stored_ref DROP COLUMN col_1 CASCADE;
|
||||
RESET client_min_messages;
|
||||
ALTER TABLE generated_stored_ref DROP COLUMN col_4;
|
||||
|
||||
-- show that undistribute_table works fine
|
||||
|
|
|
@ -92,9 +92,9 @@ SET citus.shard_replication_factor TO 1;
|
|||
CREATE TABLE col_compression (a TEXT COMPRESSION pglz, b TEXT);
|
||||
SELECT create_distributed_table('col_compression', 'a', shard_count:=4);
|
||||
|
||||
SELECT attname || ' ' || attcompression AS column_compression FROM pg_attribute WHERE attrelid::regclass::text LIKE 'col\_compression%' AND attnum > 0 ORDER BY 1;
|
||||
SELECT attname || ' ' || attcompression::text AS column_compression FROM pg_attribute WHERE attrelid::regclass::text LIKE 'col\_compression%' AND attnum > 0 ORDER BY 1;
|
||||
SELECT result AS column_compression FROM run_command_on_workers($$SELECT ARRAY(
|
||||
SELECT attname || ' ' || attcompression FROM pg_attribute WHERE attrelid::regclass::text LIKE 'pg14.col\_compression%' AND attnum > 0 ORDER BY 1
|
||||
SELECT attname || ' ' || attcompression::text FROM pg_attribute WHERE attrelid::regclass::text LIKE 'pg14.col\_compression%' AND attnum > 0 ORDER BY 1
|
||||
)$$);
|
||||
|
||||
-- test column compression propagation in rebalance
|
||||
|
@ -103,20 +103,20 @@ SELECT citus_move_shard_placement((SELECT * FROM moving_shard), :'public_worker_
|
|||
SELECT rebalance_table_shards('col_compression', rebalance_strategy := 'by_shard_count', shard_transfer_mode := 'block_writes');
|
||||
CALL citus_cleanup_orphaned_shards();
|
||||
SELECT result AS column_compression FROM run_command_on_workers($$SELECT ARRAY(
|
||||
SELECT attname || ' ' || attcompression FROM pg_attribute WHERE attrelid::regclass::text LIKE 'pg14.col\_compression%' AND attnum > 0 ORDER BY 1
|
||||
SELECT attname || ' ' || attcompression::text FROM pg_attribute WHERE attrelid::regclass::text LIKE 'pg14.col\_compression%' AND attnum > 0 ORDER BY 1
|
||||
)$$);
|
||||
|
||||
-- test propagation of ALTER TABLE .. ALTER COLUMN .. SET COMPRESSION ..
|
||||
ALTER TABLE col_compression ALTER COLUMN b SET COMPRESSION pglz;
|
||||
ALTER TABLE col_compression ALTER COLUMN a SET COMPRESSION default;
|
||||
SELECT result AS column_compression FROM run_command_on_workers($$SELECT ARRAY(
|
||||
SELECT attname || ' ' || attcompression FROM pg_attribute WHERE attrelid::regclass::text LIKE 'pg14.col\_compression%' AND attnum > 0 ORDER BY 1
|
||||
SELECT attname || ' ' || attcompression::text FROM pg_attribute WHERE attrelid::regclass::text LIKE 'pg14.col\_compression%' AND attnum > 0 ORDER BY 1
|
||||
)$$);
|
||||
|
||||
-- test propagation of ALTER TABLE .. ADD COLUMN .. COMPRESSION ..
|
||||
ALTER TABLE col_compression ADD COLUMN c TEXT COMPRESSION pglz;
|
||||
SELECT result AS column_compression FROM run_command_on_workers($$SELECT ARRAY(
|
||||
SELECT attname || ' ' || attcompression FROM pg_attribute WHERE attrelid::regclass::text LIKE 'pg14.col\_compression%' AND attnum > 0 ORDER BY 1
|
||||
SELECT attname || ' ' || attcompression::text FROM pg_attribute WHERE attrelid::regclass::text LIKE 'pg14.col\_compression%' AND attnum > 0 ORDER BY 1
|
||||
)$$);
|
||||
|
||||
-- test attaching to a partitioned table with column compression
|
||||
|
@ -126,7 +126,7 @@ SELECT create_distributed_table('col_comp_par', 'a');
|
|||
CREATE TABLE col_comp_par_1 PARTITION OF col_comp_par FOR VALUES FROM ('abc') TO ('def');
|
||||
|
||||
SELECT result AS column_compression FROM run_command_on_workers($$SELECT ARRAY(
|
||||
SELECT attname || ' ' || attcompression FROM pg_attribute WHERE attrelid::regclass::text LIKE 'pg14.col\_comp\_par\_1\_%' AND attnum > 0 ORDER BY 1
|
||||
SELECT attname || ' ' || attcompression::text FROM pg_attribute WHERE attrelid::regclass::text LIKE 'pg14.col\_comp\_par\_1\_%' AND attnum > 0 ORDER BY 1
|
||||
)$$);
|
||||
|
||||
RESET citus.multi_shard_modify_mode;
|
||||
|
|
|
@ -0,0 +1,157 @@
|
|||
--
|
||||
-- PG15
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
\if :server_version_ge_15
|
||||
\else
|
||||
\q
|
||||
\endif
|
||||
|
||||
CREATE SCHEMA pg15;
|
||||
SET search_path TO pg15;
|
||||
SET citus.next_shard_id TO 960000;
|
||||
SET citus.shard_count TO 4;
|
||||
|
||||
--
|
||||
-- In PG15, there is an added option to use ICU as global locale provider.
|
||||
-- pg_collation has three locale-related fields: collcollate and collctype,
|
||||
-- which are libc-related fields, and a new one colliculocale, which is the
|
||||
-- ICU-related field. Only the libc-related fields or the ICU-related field
|
||||
-- is set, never both.
|
||||
-- Relevant PG commits:
|
||||
-- f2553d43060edb210b36c63187d52a632448e1d2
|
||||
-- 54637508f87bd5f07fb9406bac6b08240283be3b
|
||||
--
|
||||
|
||||
-- fail, needs "locale"
|
||||
CREATE COLLATION german_phonebook_test (provider = icu, lc_collate = 'de-u-co-phonebk');
|
||||
|
||||
-- fail, needs "locale"
|
||||
CREATE COLLATION german_phonebook_test (provider = icu, lc_collate = 'de-u-co-phonebk', lc_ctype = 'de-u-co-phonebk');
|
||||
|
||||
-- works
|
||||
CREATE COLLATION german_phonebook_test (provider = icu, locale = 'de-u-co-phonebk');
|
||||
|
||||
-- with icu provider, colliculocale will be set, collcollate and collctype will be null
|
||||
SELECT result FROM run_command_on_all_nodes('
|
||||
SELECT collcollate FROM pg_collation WHERE collname = ''german_phonebook_test'';
|
||||
');
|
||||
SELECT result FROM run_command_on_all_nodes('
|
||||
SELECT collctype FROM pg_collation WHERE collname = ''german_phonebook_test'';
|
||||
');
|
||||
SELECT result FROM run_command_on_all_nodes('
|
||||
SELECT colliculocale FROM pg_collation WHERE collname = ''german_phonebook_test'';
|
||||
');
|
||||
|
||||
-- with non-icu provider, colliculocale will be null, collcollate and collctype will be set
|
||||
CREATE COLLATION default_provider (provider = libc, lc_collate = "POSIX", lc_ctype = "POSIX");
|
||||
|
||||
SELECT result FROM run_command_on_all_nodes('
|
||||
SELECT collcollate FROM pg_collation WHERE collname = ''default_provider'';
|
||||
');
|
||||
SELECT result FROM run_command_on_all_nodes('
|
||||
SELECT collctype FROM pg_collation WHERE collname = ''default_provider'';
|
||||
');
|
||||
SELECT result FROM run_command_on_all_nodes('
|
||||
SELECT colliculocale FROM pg_collation WHERE collname = ''default_provider'';
|
||||
');
|
||||
|
||||
--
|
||||
-- In PG15, Renaming triggers on partitioned tables had two problems
|
||||
-- recurses to renaming the triggers on the partitions as well.
|
||||
-- Here we test that distributed triggers behave the same way.
|
||||
-- Relevant PG commit:
|
||||
-- 80ba4bb383538a2ee846fece6a7b8da9518b6866
|
||||
--
|
||||
|
||||
SET citus.enable_unsafe_triggers TO true;
|
||||
|
||||
CREATE TABLE sale(
|
||||
sale_date date not null,
|
||||
state_code text,
|
||||
product_sku text,
|
||||
units integer)
|
||||
PARTITION BY list (state_code);
|
||||
|
||||
ALTER TABLE sale ADD CONSTRAINT sale_pk PRIMARY KEY (state_code, sale_date);
|
||||
|
||||
CREATE TABLE sale_newyork PARTITION OF sale FOR VALUES IN ('NY');
|
||||
CREATE TABLE sale_california PARTITION OF sale FOR VALUES IN ('CA');
|
||||
|
||||
CREATE TABLE record_sale(
|
||||
operation_type text not null,
|
||||
product_sku text,
|
||||
state_code text,
|
||||
units integer,
|
||||
PRIMARY KEY(state_code, product_sku, operation_type, units));
|
||||
|
||||
SELECT create_distributed_table('sale', 'state_code');
|
||||
SELECT create_distributed_table('record_sale', 'state_code', colocate_with := 'sale');
|
||||
|
||||
CREATE OR REPLACE FUNCTION record_sale()
|
||||
RETURNS trigger
|
||||
AS $$
|
||||
BEGIN
|
||||
INSERT INTO pg15.record_sale(operation_type, product_sku, state_code, units)
|
||||
VALUES (TG_OP, NEW.product_sku, NEW.state_code, NEW.units);
|
||||
RETURN NULL;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER record_sale_trigger
|
||||
AFTER INSERT OR UPDATE OR DELETE ON sale
|
||||
FOR EACH ROW EXECUTE FUNCTION pg15.record_sale();
|
||||
|
||||
CREATE VIEW sale_triggers AS
|
||||
SELECT tgname, tgrelid::regclass, tgenabled
|
||||
FROM pg_trigger
|
||||
WHERE tgrelid::regclass::text like 'sale%'
|
||||
ORDER BY 1, 2;
|
||||
|
||||
SELECT * FROM sale_triggers ORDER BY 1, 2;
|
||||
ALTER TRIGGER "record_sale_trigger" ON "pg15"."sale" RENAME TO "new_record_sale_trigger";
|
||||
SELECT * FROM sale_triggers ORDER BY 1, 2;
|
||||
|
||||
--
|
||||
-- In PG15, For GENERATED columns, all dependencies of the generation
|
||||
-- expression are recorded as NORMAL dependencies of the column itself.
|
||||
-- This requires CASCADE to drop generated cols with the original col.
|
||||
-- Test this behavior in distributed table, specifically with
|
||||
-- undistribute_table within a transaction.
|
||||
-- Relevant PG Commit: cb02fcb4c95bae08adaca1202c2081cfc81a28b5
|
||||
--
|
||||
|
||||
CREATE TABLE generated_stored_ref (
|
||||
col_1 int,
|
||||
col_2 int,
|
||||
col_3 int generated always as (col_1+col_2) stored,
|
||||
col_4 int,
|
||||
col_5 int generated always as (col_4*2-col_1) stored
|
||||
);
|
||||
|
||||
SELECT create_reference_table ('generated_stored_ref');
|
||||
|
||||
-- populate the table
|
||||
INSERT INTO generated_stored_ref (col_1, col_4) VALUES (1,2), (11,12);
|
||||
INSERT INTO generated_stored_ref (col_1, col_2, col_4) VALUES (100,101,102), (200,201,202);
|
||||
SELECT * FROM generated_stored_ref ORDER BY 1,2,3,4,5;
|
||||
|
||||
-- fails, CASCADE must be specified
|
||||
-- will test CASCADE inside the transcation
|
||||
ALTER TABLE generated_stored_ref DROP COLUMN col_1;
|
||||
|
||||
BEGIN;
|
||||
-- drops col_1, col_3, col_5
|
||||
ALTER TABLE generated_stored_ref DROP COLUMN col_1 CASCADE;
|
||||
ALTER TABLE generated_stored_ref DROP COLUMN col_4;
|
||||
|
||||
-- show that undistribute_table works fine
|
||||
SELECT undistribute_table('generated_stored_ref');
|
||||
INSERT INTO generated_stored_ref VALUES (5);
|
||||
SELECT * FROM generated_stored_REF ORDER BY 1;
|
||||
ROLLBACK;
|
||||
|
||||
-- Clean up
|
||||
DROP SCHEMA pg15 CASCADE;
|
|
@ -1,3 +1,13 @@
|
|||
--
|
||||
-- SINGLE_NODE
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
|
||||
CREATE SCHEMA single_node;
|
||||
SET search_path TO single_node;
|
||||
SET citus.shard_count TO 4;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue