mirror of https://github.com/citusdata/citus.git
Merge pull request #2778 from citusdata/2400_modifying_ctes
Support CTEs in router planner for modification queriespull/2777/head
commit
6ae9158216
|
@ -37,6 +37,7 @@ lib*.pc
|
|||
/autom4te.cache
|
||||
/Makefile.global
|
||||
/src/Makefile.custom
|
||||
/compile_commands.json
|
||||
|
||||
# temporary files vim creates
|
||||
*.swp
|
||||
|
|
|
@ -272,7 +272,7 @@ AcquireExecutorShardLock(Task *task, CmdType commandType)
|
|||
* must be conflict with each other modify command. By getting ExlcusiveLock
|
||||
* we guarantee that. Note that, getting ExlusiveLock does not mimic the
|
||||
* behaviour of Postgres exactly. Getting row lock with FOR NO KEY UPDATE and
|
||||
* FOR KEY SHARE do not conflicts in Postgres, yet they block each other in
|
||||
* FOR KEY SHARE do not conflict in Postgres, yet they block each other in
|
||||
* our implementation. Since FOR SHARE and FOR KEY SHARE does not conflict
|
||||
* with each other but conflicts with modify commands, we get ShareLock for
|
||||
* them.
|
||||
|
|
|
@ -641,7 +641,7 @@ CreateDistributedPlan(uint64 planId, Query *originalQuery, Query *query, ParamLi
|
|||
}
|
||||
else
|
||||
{
|
||||
RaiseDeferredError(distributedPlan->planningError, DEBUG1);
|
||||
RaiseDeferredError(distributedPlan->planningError, DEBUG2);
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -655,21 +655,18 @@ CreateDistributedPlan(uint64 planId, Query *originalQuery, Query *query, ParamLi
|
|||
|
||||
distributedPlan = CreateRouterPlan(originalQuery, query,
|
||||
plannerRestrictionContext);
|
||||
if (distributedPlan != NULL)
|
||||
if (distributedPlan->planningError == NULL)
|
||||
{
|
||||
if (distributedPlan->planningError == NULL)
|
||||
{
|
||||
/* successfully created a router plan */
|
||||
return distributedPlan;
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* For debugging it's useful to display why query was not
|
||||
* router plannable.
|
||||
*/
|
||||
RaiseDeferredError(distributedPlan->planningError, DEBUG1);
|
||||
}
|
||||
/* successfully created a router plan */
|
||||
return distributedPlan;
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* For debugging it's useful to display why query was not
|
||||
* router plannable.
|
||||
*/
|
||||
RaiseDeferredError(distributedPlan->planningError, DEBUG2);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -108,10 +108,11 @@ bool EnableRouterExecution = true;
|
|||
|
||||
|
||||
/* planner functions forward declarations */
|
||||
static DistributedPlan * CreateSingleTaskRouterPlan(Query *originalQuery,
|
||||
Query *query,
|
||||
PlannerRestrictionContext *
|
||||
plannerRestrictionContext);
|
||||
static void CreateSingleTaskRouterPlan(DistributedPlan *distributedPlan,
|
||||
Query *originalQuery,
|
||||
Query *query,
|
||||
PlannerRestrictionContext *
|
||||
plannerRestrictionContext);
|
||||
static bool IsTidColumn(Node *node);
|
||||
static DeferredErrorMessage * MultiShardModifyQuerySupported(Query *originalQuery,
|
||||
PlannerRestrictionContext *
|
||||
|
@ -137,7 +138,7 @@ static void NormalizeMultiRowInsertTargetList(Query *query);
|
|||
static List * BuildRoutesForInsert(Query *query, DeferredErrorMessage **planningError);
|
||||
static List * GroupInsertValuesByShardId(List *insertValuesList);
|
||||
static List * ExtractInsertValuesList(Query *query, Var *partitionColumn);
|
||||
static bool MultiRouterPlannableQuery(Query *query);
|
||||
static DeferredErrorMessage * MultiRouterPlannableQuery(Query *query);
|
||||
static DeferredErrorMessage * ErrorIfQueryHasModifyingCTE(Query *queryTree);
|
||||
static RangeTblEntry * GetUpdateOrDeleteRTE(Query *query);
|
||||
static bool SelectsFromDistributedTable(List *rangeTableList, Query *query);
|
||||
|
@ -163,24 +164,23 @@ static void ReorderTaskPlacementsByTaskAssignmentPolicy(Job *job,
|
|||
|
||||
/*
|
||||
* CreateRouterPlan attempts to create a router executor plan for the given
|
||||
* SELECT statement. If planning fails either NULL is returned, or
|
||||
* ->planningError is set to a description of the failure.
|
||||
* SELECT statement. ->planningError is set if planning fails.
|
||||
*/
|
||||
DistributedPlan *
|
||||
CreateRouterPlan(Query *originalQuery, Query *query,
|
||||
PlannerRestrictionContext *plannerRestrictionContext)
|
||||
{
|
||||
if (MultiRouterPlannableQuery(query))
|
||||
DistributedPlan *distributedPlan = CitusMakeNode(DistributedPlan);
|
||||
|
||||
distributedPlan->planningError = MultiRouterPlannableQuery(query);
|
||||
|
||||
if (distributedPlan->planningError == NULL)
|
||||
{
|
||||
return CreateSingleTaskRouterPlan(originalQuery, query,
|
||||
plannerRestrictionContext);
|
||||
CreateSingleTaskRouterPlan(distributedPlan, originalQuery, query,
|
||||
plannerRestrictionContext);
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: Instead have MultiRouterPlannableQuery set an error describing
|
||||
* why router cannot support the query.
|
||||
*/
|
||||
return NULL;
|
||||
return distributedPlan;
|
||||
}
|
||||
|
||||
|
||||
|
@ -245,30 +245,23 @@ CreateModifyPlan(Query *originalQuery, Query *query,
|
|||
* are router plannable by default. If query is not router plannable then either NULL is
|
||||
* returned, or the returned plan has planningError set to a description of the problem.
|
||||
*/
|
||||
static DistributedPlan *
|
||||
CreateSingleTaskRouterPlan(Query *originalQuery, Query *query,
|
||||
static void
|
||||
CreateSingleTaskRouterPlan(DistributedPlan *distributedPlan, Query *originalQuery,
|
||||
Query *query,
|
||||
PlannerRestrictionContext *plannerRestrictionContext)
|
||||
{
|
||||
Job *job = NULL;
|
||||
DistributedPlan *distributedPlan = CitusMakeNode(DistributedPlan);
|
||||
|
||||
distributedPlan->operation = query->commandType;
|
||||
|
||||
/* FIXME: this should probably rather be inlined into CreateRouterPlan */
|
||||
distributedPlan->planningError = ErrorIfQueryHasModifyingCTE(query);
|
||||
if (distributedPlan->planningError)
|
||||
{
|
||||
return distributedPlan;
|
||||
}
|
||||
|
||||
/* we cannot have multi shard update/delete query via this code path */
|
||||
job = RouterJob(originalQuery, plannerRestrictionContext,
|
||||
&distributedPlan->planningError);
|
||||
|
||||
if (distributedPlan->planningError)
|
||||
if (distributedPlan->planningError != NULL)
|
||||
{
|
||||
/* query cannot be handled by this planner */
|
||||
return NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
ereport(DEBUG2, (errmsg("Creating router plan")));
|
||||
|
@ -277,8 +270,6 @@ CreateSingleTaskRouterPlan(Query *originalQuery, Query *query,
|
|||
distributedPlan->masterQuery = NULL;
|
||||
distributedPlan->routerExecutable = true;
|
||||
distributedPlan->hasReturning = false;
|
||||
|
||||
return distributedPlan;
|
||||
}
|
||||
|
||||
|
||||
|
@ -603,13 +594,46 @@ ModifyQuerySupported(Query *queryTree, Query *originalQuery, bool multiShardQuer
|
|||
}
|
||||
}
|
||||
|
||||
/* reject queries which include CommonTableExpr */
|
||||
/* reject queries which include CommonTableExpr which aren't routable */
|
||||
if (queryTree->cteList != NIL)
|
||||
{
|
||||
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
|
||||
"common table expressions are not supported in distributed "
|
||||
"modifications",
|
||||
NULL, NULL);
|
||||
ListCell *cteCell = NULL;
|
||||
|
||||
foreach(cteCell, queryTree->cteList)
|
||||
{
|
||||
CommonTableExpr *cte = (CommonTableExpr *) lfirst(cteCell);
|
||||
Query *cteQuery = (Query *) cte->ctequery;
|
||||
DeferredErrorMessage *cteError = NULL;
|
||||
|
||||
if (cteQuery->commandType != CMD_SELECT)
|
||||
{
|
||||
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
|
||||
"Router planner doesn't support non-select common table expressions.",
|
||||
NULL, NULL);
|
||||
}
|
||||
|
||||
if (cteQuery->hasForUpdate)
|
||||
{
|
||||
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
|
||||
"Router planner doesn't support SELECT FOR UPDATE"
|
||||
" in common table expressions.",
|
||||
NULL, NULL);
|
||||
}
|
||||
|
||||
if (FindNodeCheck((Node *) cteQuery, CitusIsVolatileFunction))
|
||||
{
|
||||
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
|
||||
"Router planner doesn't support VOLATILE functions"
|
||||
" in common table expressions.",
|
||||
NULL, NULL);
|
||||
}
|
||||
|
||||
cteError = MultiRouterPlannableQuery(cteQuery);
|
||||
if (cteError)
|
||||
{
|
||||
return cteError;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* extract range table entries */
|
||||
|
@ -666,8 +690,6 @@ ModifyQuerySupported(Query *queryTree, Query *originalQuery, bool multiShardQuer
|
|||
* Error out for rangeTableEntries that we do not support.
|
||||
* We do not explicitly specify "in FROM clause" in the error detail
|
||||
* for the features that we do not support at all (SUBQUERY, JOIN).
|
||||
* We do not need to check for RTE_CTE because all common table expressions
|
||||
* are rejected above with queryTree->cteList check.
|
||||
*/
|
||||
if (rangeTableEntry->rtekind == RTE_SUBQUERY)
|
||||
{
|
||||
|
@ -696,6 +718,11 @@ ModifyQuerySupported(Query *queryTree, Query *originalQuery, bool multiShardQuer
|
|||
rangeTableEntryErrorDetail = "Functions must not appear in the FROM"
|
||||
" clause of a distributed modifications.";
|
||||
}
|
||||
else if (rangeTableEntry->rtekind == RTE_CTE)
|
||||
{
|
||||
rangeTableEntryErrorDetail = "Common table expressions are not supported"
|
||||
" in distributed modifications.";
|
||||
}
|
||||
else
|
||||
{
|
||||
rangeTableEntryErrorDetail = "Unrecognized range table entry.";
|
||||
|
@ -1591,7 +1618,7 @@ RouterJob(Query *originalQuery, PlannerRestrictionContext *plannerRestrictionCon
|
|||
bool isMultiShardModifyQuery = false;
|
||||
Const *partitionKeyValue = NULL;
|
||||
|
||||
/* router planner should create task even if it deosn't hit a shard at all */
|
||||
/* router planner should create task even if it doesn't hit a shard at all */
|
||||
replacePrunedQueryWithDummy = true;
|
||||
|
||||
/* check if this query requires master evaluation */
|
||||
|
@ -1961,9 +1988,9 @@ PlanRouterQuery(Query *originalQuery,
|
|||
*/
|
||||
if (commandType == CMD_SELECT)
|
||||
{
|
||||
planningError = DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
|
||||
NULL, NULL, NULL);
|
||||
return planningError;
|
||||
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
|
||||
"Router planner cannot handle multi-shard select queries",
|
||||
NULL, NULL);
|
||||
}
|
||||
|
||||
Assert(UpdateOrDeleteQuery(originalQuery));
|
||||
|
@ -2079,7 +2106,6 @@ PlanRouterQuery(Query *originalQuery,
|
|||
return planningError;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* If this is an UPDATE or DELETE query which requires master evaluation,
|
||||
* don't try update shard names, and postpone that to execution phase.
|
||||
|
@ -2941,30 +2967,26 @@ ExtractInsertPartitionKeyValue(Query *query)
|
|||
|
||||
|
||||
/*
|
||||
* MultiRouterPlannableQuery returns true if given query can be router plannable.
|
||||
* MultiRouterPlannableQuery checks if given select query is router plannable,
|
||||
* setting distributedPlan->planningError if not.
|
||||
* The query is router plannable if it is a modify query, or if its is a select
|
||||
* query issued on a hash partitioned distributed table. Router plannable checks
|
||||
* for select queries can be turned off by setting citus.enable_router_execution
|
||||
* flag to false.
|
||||
*/
|
||||
static bool
|
||||
static DeferredErrorMessage *
|
||||
MultiRouterPlannableQuery(Query *query)
|
||||
{
|
||||
CmdType commandType = query->commandType;
|
||||
List *rangeTableRelationList = NIL;
|
||||
ListCell *rangeTableRelationCell = NULL;
|
||||
|
||||
if (commandType == CMD_INSERT || commandType == CMD_UPDATE ||
|
||||
commandType == CMD_DELETE)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
Assert(commandType == CMD_SELECT);
|
||||
Assert(query->commandType == CMD_SELECT);
|
||||
|
||||
if (!EnableRouterExecution)
|
||||
{
|
||||
return false;
|
||||
return DeferredError(ERRCODE_SUCCESSFUL_COMPLETION,
|
||||
"Router planner not enabled.",
|
||||
NULL, NULL);
|
||||
}
|
||||
|
||||
ExtractRangeTableRelationWalker((Node *) query, &rangeTableRelationList);
|
||||
|
@ -2980,14 +3002,20 @@ MultiRouterPlannableQuery(Query *query)
|
|||
if (!IsDistributedTable(distributedTableId))
|
||||
{
|
||||
/* local tables cannot be read from workers */
|
||||
return false;
|
||||
return DeferredError(
|
||||
ERRCODE_FEATURE_NOT_SUPPORTED,
|
||||
"Local tables cannot be used in distributed queries.",
|
||||
NULL, NULL);
|
||||
}
|
||||
|
||||
partitionMethod = PartitionMethod(distributedTableId);
|
||||
if (!(partitionMethod == DISTRIBUTE_BY_HASH || partitionMethod ==
|
||||
DISTRIBUTE_BY_NONE || partitionMethod == DISTRIBUTE_BY_RANGE))
|
||||
{
|
||||
return false;
|
||||
return DeferredError(
|
||||
ERRCODE_FEATURE_NOT_SUPPORTED,
|
||||
"Router planner does not support append-partitioned tables.",
|
||||
NULL, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3002,13 +3030,16 @@ MultiRouterPlannableQuery(Query *query)
|
|||
|
||||
if (tableReplicationFactor > 1 && partitionMethod != DISTRIBUTE_BY_NONE)
|
||||
{
|
||||
return false;
|
||||
return DeferredError(
|
||||
ERRCODE_FEATURE_NOT_SUPPORTED,
|
||||
"SELECT FOR UPDATE with table replication factor > 1 not supported for non-reference tables.",
|
||||
NULL, NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
return ErrorIfQueryHasModifyingCTE(query);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -31,6 +31,8 @@ DeferredErrorInternal(int code, const char *message, const char *detail, const c
|
|||
{
|
||||
DeferredErrorMessage *error = CitusMakeNode(DeferredErrorMessage);
|
||||
|
||||
Assert(message != NULL);
|
||||
|
||||
error->code = code;
|
||||
error->message = message;
|
||||
error->detail = detail;
|
||||
|
|
|
@ -234,7 +234,7 @@ PG_FUNCTION_INFO_V1(poolinfo_valid);
|
|||
|
||||
/*
|
||||
* EnsureModificationsCanRun checks if the current node is in recovery mode or
|
||||
* citus.use_secondary_nodes is 'alwaus'. If either is true the function errors out.
|
||||
* citus.use_secondary_nodes is 'always'. If either is true the function errors out.
|
||||
*/
|
||||
void
|
||||
EnsureModificationsCanRun(void)
|
||||
|
|
|
@ -52,7 +52,6 @@ WHERE
|
|||
foo.avg_tenant_id::int::text = reference_table.id
|
||||
RETURNING
|
||||
reference_table.name;
|
||||
DEBUG: only reference tables may be queried when targeting a reference table with multi shard UPDATE/DELETE queries with multiple tables
|
||||
DEBUG: generating subplan 4_1 for subquery SELECT avg((tenant_id)::integer) AS avg_tenant_id FROM recursive_dml_queries.second_distributed_table
|
||||
DEBUG: Plan 4 query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.reference_table SET name = ('new_'::text OPERATOR(pg_catalog.||) reference_table.name) FROM (SELECT intermediate_result.avg_tenant_id FROM read_intermediate_result('4_1'::text, 'binary'::citus_copy_format) intermediate_result(avg_tenant_id numeric)) foo WHERE (((foo.avg_tenant_id)::integer)::text OPERATOR(pg_catalog.=) reference_table.id) RETURNING reference_table.name
|
||||
name
|
||||
|
@ -86,7 +85,6 @@ WHERE
|
|||
AND second_distributed_table.dept IN (2)
|
||||
RETURNING
|
||||
second_distributed_table.tenant_id, second_distributed_table.dept;
|
||||
DEBUG: complex joins are only supported when all distributed tables are joined on their distribution columns with equal operator
|
||||
DEBUG: generating subplan 6_1 for subquery SELECT DISTINCT ON (tenant_id) tenant_id, max(dept) AS max_dept FROM (SELECT second_distributed_table.dept, second_distributed_table.tenant_id FROM recursive_dml_queries.second_distributed_table, recursive_dml_queries.distributed_table WHERE (distributed_table.tenant_id OPERATOR(pg_catalog.=) second_distributed_table.tenant_id)) foo_inner GROUP BY tenant_id ORDER BY tenant_id DESC
|
||||
DEBUG: Plan 6 query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.second_distributed_table SET dept = (foo.max_dept OPERATOR(pg_catalog.*) 2) FROM (SELECT intermediate_result.tenant_id, intermediate_result.max_dept FROM read_intermediate_result('6_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text, max_dept integer)) foo WHERE ((foo.tenant_id OPERATOR(pg_catalog.<>) second_distributed_table.tenant_id) AND (second_distributed_table.dept OPERATOR(pg_catalog.=) 2)) RETURNING second_distributed_table.tenant_id, second_distributed_table.dept
|
||||
tenant_id | dept
|
||||
|
@ -137,7 +135,6 @@ FROM
|
|||
WHERE
|
||||
foo.tenant_id != second_distributed_table.tenant_id
|
||||
AND second_distributed_table.dept IN (3);
|
||||
DEBUG: complex joins are only supported when all distributed tables are joined on their distribution columns with equal operator
|
||||
DEBUG: generating subplan 8_1 for subquery SELECT second_distributed_table.tenant_id FROM recursive_dml_queries.second_distributed_table, recursive_dml_queries.distributed_table WHERE ((distributed_table.tenant_id OPERATOR(pg_catalog.=) second_distributed_table.tenant_id) AND (second_distributed_table.dept OPERATOR(pg_catalog.=) ANY (ARRAY[4, 5])))
|
||||
DEBUG: generating subplan 8_2 for subquery SELECT DISTINCT foo_inner_1.tenant_id FROM (SELECT second_distributed_table.dept, second_distributed_table.tenant_id FROM recursive_dml_queries.second_distributed_table, recursive_dml_queries.distributed_table WHERE ((distributed_table.tenant_id OPERATOR(pg_catalog.=) second_distributed_table.tenant_id) AND (second_distributed_table.dept OPERATOR(pg_catalog.=) ANY (ARRAY[3, 4])))) foo_inner_1, (SELECT intermediate_result.tenant_id FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) foo_inner_2 WHERE (foo_inner_1.tenant_id OPERATOR(pg_catalog.<>) foo_inner_2.tenant_id)
|
||||
DEBUG: Plan 8 query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.second_distributed_table SET dept = ((foo.tenant_id)::integer OPERATOR(pg_catalog./) 4) FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('8_2'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) foo WHERE ((foo.tenant_id OPERATOR(pg_catalog.<>) second_distributed_table.tenant_id) AND (second_distributed_table.dept OPERATOR(pg_catalog.=) 3))
|
||||
|
@ -157,7 +154,6 @@ WHERE
|
|||
foo.avg_tenant_id::int::text = distributed_table.tenant_id
|
||||
RETURNING
|
||||
distributed_table.*;
|
||||
DEBUG: relation local_table is not distributed
|
||||
DEBUG: generating subplan 11_1 for subquery SELECT avg((id)::integer) AS avg_tenant_id FROM recursive_dml_queries.local_table
|
||||
DEBUG: Plan 11 query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.distributed_table SET dept = (foo.avg_tenant_id)::integer FROM (SELECT intermediate_result.avg_tenant_id FROM read_intermediate_result('11_1'::text, 'binary'::citus_copy_format) intermediate_result(avg_tenant_id numeric)) foo WHERE (((foo.avg_tenant_id)::integer)::text OPERATOR(pg_catalog.=) distributed_table.tenant_id) RETURNING distributed_table.tenant_id, distributed_table.dept, distributed_table.info
|
||||
tenant_id | dept | info
|
||||
|
@ -181,7 +177,6 @@ WHERE
|
|||
foo.avg_tenant_id::int::text = distributed_table.tenant_id
|
||||
RETURNING
|
||||
distributed_table.*;
|
||||
DEBUG: relation tenant_ids is not distributed
|
||||
DEBUG: generating subplan 12_1 for subquery SELECT avg((tenant_id)::integer) AS avg_tenant_id FROM (SELECT distributed_table.tenant_id, reference_table.name FROM recursive_dml_queries.distributed_table, recursive_dml_queries.reference_table WHERE ((distributed_table.dept)::text OPERATOR(pg_catalog.=) reference_table.id) ORDER BY reference_table.name DESC, distributed_table.tenant_id DESC) tenant_ids
|
||||
DEBUG: Plan 12 query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.distributed_table SET dept = (foo.avg_tenant_id)::integer FROM (SELECT intermediate_result.avg_tenant_id FROM read_intermediate_result('12_1'::text, 'binary'::citus_copy_format) intermediate_result(avg_tenant_id numeric)) foo WHERE (((foo.avg_tenant_id)::integer)::text OPERATOR(pg_catalog.=) distributed_table.tenant_id) RETURNING distributed_table.tenant_id, distributed_table.dept, distributed_table.info
|
||||
tenant_id | dept | info
|
||||
|
@ -223,7 +218,6 @@ FROM
|
|||
ON (foo_inner_2.tenant_id != foo_inner_1.tenant_id)
|
||||
) as foo
|
||||
RETURNING *;
|
||||
DEBUG: complex joins are only supported when all distributed tables are joined on their distribution columns with equal operator
|
||||
ERROR: complex joins are only supported when all distributed tables are joined on their distribution columns with equal operator
|
||||
-- again a corrolated subquery
|
||||
-- this time distribution key eq. exists
|
||||
|
@ -254,25 +248,18 @@ FROM
|
|||
) as baz
|
||||
) as foo WHERE second_distributed_table.tenant_id = foo.tenant_id
|
||||
RETURNING *;
|
||||
DEBUG: complex joins are only supported when all distributed tables are joined on their distribution columns with equal operator
|
||||
ERROR: complex joins are only supported when all distributed tables are joined on their distribution columns with equal operator
|
||||
-- we don't support subquerues/CTEs inside VALUES
|
||||
-- we don't support subqueries/CTEs inside VALUES
|
||||
INSERT INTO
|
||||
second_distributed_table (tenant_id, dept)
|
||||
VALUES ('3', (WITH vals AS (SELECT 3) select * from vals));
|
||||
DEBUG: subqueries are not supported within INSERT queries
|
||||
HINT: Try rewriting your queries with 'INSERT INTO ... SELECT' syntax.
|
||||
DEBUG: generating subplan 18_1 for CTE vals: SELECT 3
|
||||
DEBUG: Plan 18 query after replacing subqueries and CTEs: INSERT INTO recursive_dml_queries.second_distributed_table (tenant_id, dept) VALUES ('3'::text, (SELECT vals."?column?" FROM (SELECT intermediate_result."?column?" FROM read_intermediate_result('18_1'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer)) vals))
|
||||
DEBUG: subqueries are not supported within INSERT queries
|
||||
HINT: Try rewriting your queries with 'INSERT INTO ... SELECT' syntax.
|
||||
ERROR: subqueries are not supported within INSERT queries
|
||||
HINT: Try rewriting your queries with 'INSERT INTO ... SELECT' syntax.
|
||||
INSERT INTO
|
||||
second_distributed_table (tenant_id, dept)
|
||||
VALUES ('3', (SELECT 3));
|
||||
DEBUG: subqueries are not supported within INSERT queries
|
||||
HINT: Try rewriting your queries with 'INSERT INTO ... SELECT' syntax.
|
||||
ERROR: subqueries are not supported within INSERT queries
|
||||
HINT: Try rewriting your queries with 'INSERT INTO ... SELECT' syntax.
|
||||
-- DML with an unreferenced SELECT CTE
|
||||
|
@ -291,9 +278,7 @@ UPDATE distributed_table
|
|||
SET dept = 5
|
||||
FROM cte_1
|
||||
WHERE distributed_table.tenant_id < cte_1.tenant_id;
|
||||
DEBUG: common table expressions are not supported in distributed modifications
|
||||
DEBUG: generating subplan 20_1 for CTE cte_1: WITH cte_2 AS (SELECT second_distributed_table.tenant_id AS cte2_id FROM recursive_dml_queries.second_distributed_table WHERE (second_distributed_table.dept OPERATOR(pg_catalog.>=) 2)) UPDATE recursive_dml_queries.distributed_table SET dept = 10 RETURNING tenant_id, dept, info
|
||||
DEBUG: common table expressions are not supported in distributed modifications
|
||||
DEBUG: Plan 20 query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.distributed_table SET dept = 5 FROM (SELECT intermediate_result.tenant_id, intermediate_result.dept, intermediate_result.info FROM read_intermediate_result('20_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text, dept integer, info jsonb)) cte_1 WHERE (distributed_table.tenant_id OPERATOR(pg_catalog.<) cte_1.tenant_id)
|
||||
WITH cte_1 AS (
|
||||
WITH cte_2 AS (
|
||||
|
@ -310,9 +295,7 @@ UPDATE distributed_table
|
|||
SET dept = 5
|
||||
FROM cte_1
|
||||
WHERE distributed_table.tenant_id < cte_1.tenant_id;
|
||||
DEBUG: common table expressions are not supported in distributed modifications
|
||||
DEBUG: generating subplan 22_1 for CTE cte_1: WITH cte_2 AS (SELECT second_distributed_table.tenant_id AS cte2_id FROM recursive_dml_queries.second_distributed_table WHERE (second_distributed_table.dept OPERATOR(pg_catalog.>=) 2)) UPDATE recursive_dml_queries.distributed_table SET dept = 10 RETURNING tenant_id, dept, info
|
||||
DEBUG: common table expressions are not supported in distributed modifications
|
||||
DEBUG: Plan 22 query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.distributed_table SET dept = 5 FROM (SELECT intermediate_result.tenant_id, intermediate_result.dept, intermediate_result.info FROM read_intermediate_result('22_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text, dept integer, info jsonb)) cte_1 WHERE (distributed_table.tenant_id OPERATOR(pg_catalog.<) cte_1.tenant_id)
|
||||
-- we don't support updating local table with a join with
|
||||
-- distributed tables
|
||||
|
@ -324,7 +307,6 @@ FROM
|
|||
distributed_table
|
||||
WHERE
|
||||
distributed_table.tenant_id = local_table.id;
|
||||
DEBUG: relation local_table is not distributed
|
||||
ERROR: relation local_table is not distributed
|
||||
RESET client_min_messages;
|
||||
DROP SCHEMA recursive_dml_queries CASCADE;
|
||||
|
|
|
@ -123,6 +123,7 @@ DEBUG: Creating router plan
|
|||
DEBUG: Plan is router executable
|
||||
DETAIL: distribution column value: 1
|
||||
DEBUG: generating subplan 18_2 for CTE t2: SELECT key, value_1, value_2 FROM fast_path_router_modify.modify_fast_path
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Plan 18 query after replacing subqueries and CTEs: SELECT key, value_1, value_2 FROM (SELECT intermediate_result.key, intermediate_result.value_1, intermediate_result.value_2 FROM read_intermediate_result('18_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value_1 integer, value_2 text)) t2
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -167,9 +168,11 @@ DEBUG: Plan is router executable
|
|||
|
||||
-- for update/share is not supported via fast-path wen replication factor > 1
|
||||
SELECT * FROM modify_fast_path_replication_2 WHERE key = 1 FOR UPDATE;
|
||||
DEBUG: SELECT FOR UPDATE with table replication factor > 1 not supported for non-reference tables.
|
||||
ERROR: could not run distributed query with FOR UPDATE/SHARE commands
|
||||
HINT: Consider using an equality filter on the distributed table's partition column.
|
||||
SELECT * FROM modify_fast_path_replication_2 WHERE key = 1 FOR SHARE;
|
||||
DEBUG: SELECT FOR UPDATE with table replication factor > 1 not supported for non-reference tables.
|
||||
ERROR: could not run distributed query with FOR UPDATE/SHARE commands
|
||||
HINT: Consider using an equality filter on the distributed table's partition column.
|
||||
-- very simple queries on reference tables goes through fast-path planning
|
||||
|
|
|
@ -7,7 +7,7 @@ SHOW server_version \gset
|
|||
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
|
||||
version_above_ten
|
||||
-------------------
|
||||
f
|
||||
t
|
||||
(1 row)
|
||||
|
||||
CREATE SCHEMA test_fkey_to_ref_in_tx;
|
||||
|
@ -124,7 +124,7 @@ BEGIN;
|
|||
|
||||
ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE bigint;
|
||||
DEBUG: rewriting table "on_update_fkey_table"
|
||||
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table"
|
||||
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
|
||||
DEBUG: validating foreign key constraint "fkey"
|
||||
ROLLBACK;
|
||||
-- case 1.6: SELECT to a reference table is followed by an unrelated DDL
|
||||
|
@ -274,7 +274,7 @@ DEBUG: switching to sequential query execution mode
|
|||
DETAIL: Reference relation "referece_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode
|
||||
ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE bigint;
|
||||
DEBUG: rewriting table "on_update_fkey_table"
|
||||
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table"
|
||||
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
|
||||
DEBUG: validating foreign key constraint "fkey"
|
||||
ROLLBACK;
|
||||
-- case 2.6: UPDATE to a reference table is followed by an unrelated DDL
|
||||
|
@ -297,7 +297,7 @@ BEGIN;
|
|||
DEBUG: switching to sequential query execution mode
|
||||
DETAIL: Reference relation "referece_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode
|
||||
TRUNCATE on_update_fkey_table;
|
||||
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table"
|
||||
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
|
||||
ROLLBACK;
|
||||
-- case 3.1: an unrelated DDL to a reference table is followed by a real-time SELECT
|
||||
BEGIN;
|
||||
|
@ -342,20 +342,20 @@ ROLLBACK;
|
|||
BEGIN;
|
||||
ALTER TABLE referece_table ALTER COLUMN id SET DATA TYPE smallint;
|
||||
DEBUG: rewriting table "referece_table"
|
||||
DEBUG: building index "referece_table_pkey" on table "referece_table"
|
||||
DEBUG: building index "referece_table_pkey" on table "referece_table" serially
|
||||
DEBUG: validating foreign key constraint "fkey"
|
||||
CREATE INDEX fkey_test_index_1 ON on_update_fkey_table(value_1);
|
||||
DEBUG: building index "fkey_test_index_1" on table "on_update_fkey_table"
|
||||
DEBUG: building index "fkey_test_index_1" on table "on_update_fkey_table" serially
|
||||
ROLLBACK;
|
||||
-- case 4.6: DDL to reference table followed by a DDL to dist table, both touching fkey columns
|
||||
BEGIN;
|
||||
ALTER TABLE referece_table ALTER COLUMN id SET DATA TYPE smallint;
|
||||
DEBUG: rewriting table "referece_table"
|
||||
DEBUG: building index "referece_table_pkey" on table "referece_table"
|
||||
DEBUG: building index "referece_table_pkey" on table "referece_table" serially
|
||||
DEBUG: validating foreign key constraint "fkey"
|
||||
ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE smallint;
|
||||
DEBUG: rewriting table "on_update_fkey_table"
|
||||
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table"
|
||||
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
|
||||
DEBUG: validating foreign key constraint "fkey"
|
||||
ROLLBACK;
|
||||
-- case 3.7: DDL to a reference table is followed by COPY
|
||||
|
@ -371,16 +371,16 @@ BEGIN;
|
|||
DEBUG: switching to sequential query execution mode
|
||||
DETAIL: Reference relation "referece_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode
|
||||
TRUNCATE on_update_fkey_table;
|
||||
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table"
|
||||
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
|
||||
ROLLBACK;
|
||||
-- case 3.9: DDL to a reference table is followed by TRUNCATE
|
||||
BEGIN;
|
||||
ALTER TABLE referece_table ALTER COLUMN id SET DATA TYPE smallint;
|
||||
DEBUG: rewriting table "referece_table"
|
||||
DEBUG: building index "referece_table_pkey" on table "referece_table"
|
||||
DEBUG: building index "referece_table_pkey" on table "referece_table" serially
|
||||
DEBUG: validating foreign key constraint "fkey"
|
||||
TRUNCATE on_update_fkey_table;
|
||||
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table"
|
||||
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
|
||||
ROLLBACK;
|
||||
-----
|
||||
--- Now, start testing the other way araound
|
||||
|
@ -435,7 +435,7 @@ BEGIN;
|
|||
|
||||
ALTER TABLE referece_table ALTER COLUMN id SET DATA TYPE smallint;
|
||||
DEBUG: rewriting table "referece_table"
|
||||
DEBUG: building index "referece_table_pkey" on table "referece_table"
|
||||
DEBUG: building index "referece_table_pkey" on table "referece_table" serially
|
||||
DEBUG: validating foreign key constraint "fkey"
|
||||
ERROR: cannot execute DDL on reference relation "referece_table" because there was a parallel SELECT access to distributed relation "on_update_fkey_table" in the same transaction
|
||||
HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';"
|
||||
|
@ -475,8 +475,8 @@ DEBUG: truncate cascades to table "on_update_fkey_table_2380001"
|
|||
DETAIL: NOTICE from localhost:57637
|
||||
DEBUG: truncate cascades to table "on_update_fkey_table_2380003"
|
||||
DETAIL: NOTICE from localhost:57637
|
||||
DEBUG: building index "referece_table_pkey" on table "referece_table"
|
||||
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table"
|
||||
DEBUG: building index "referece_table_pkey" on table "referece_table" serially
|
||||
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
|
||||
ROLLBACK;
|
||||
-- case 5.1: Parallel UPDATE on distributed table follow by a SELECT
|
||||
BEGIN;
|
||||
|
@ -507,7 +507,7 @@ BEGIN;
|
|||
UPDATE on_update_fkey_table SET value_1 = 16 WHERE value_1 = 15;
|
||||
ALTER TABLE referece_table ALTER COLUMN id SET DATA TYPE smallint;
|
||||
DEBUG: rewriting table "referece_table"
|
||||
DEBUG: building index "referece_table_pkey" on table "referece_table"
|
||||
DEBUG: building index "referece_table_pkey" on table "referece_table" serially
|
||||
DEBUG: validating foreign key constraint "fkey"
|
||||
ERROR: cannot execute DDL on reference relation "referece_table" because there was a parallel DML access to distributed relation "on_update_fkey_table" in the same transaction
|
||||
HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';"
|
||||
|
@ -523,7 +523,7 @@ ROLLBACK;
|
|||
BEGIN;
|
||||
ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE smallint;
|
||||
DEBUG: rewriting table "on_update_fkey_table"
|
||||
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table"
|
||||
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
|
||||
DEBUG: validating foreign key constraint "fkey"
|
||||
UPDATE referece_table SET id = 160 WHERE id = 15;
|
||||
ROLLBACK;
|
||||
|
@ -590,7 +590,7 @@ ROLLBACK;
|
|||
BEGIN;
|
||||
CREATE TABLE test_table_1(id int PRIMARY KEY);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1"
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially
|
||||
SELECT create_reference_table('test_table_1');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -607,7 +607,7 @@ DETAIL: NOTICE from localhost:57638
|
|||
|
||||
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id));
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
|
||||
SELECT create_distributed_table('test_table_2', 'id');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -635,7 +635,7 @@ ROLLBACK;
|
|||
BEGIN;
|
||||
CREATE TABLE test_table_1(id int PRIMARY KEY);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1"
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially
|
||||
SELECT create_reference_table('test_table_1');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -652,7 +652,7 @@ DETAIL: NOTICE from localhost:57638
|
|||
|
||||
CREATE TABLE tt4(id int PRIMARY KEY, value_1 int, FOREIGN KEY(id) REFERENCES tt4(id));
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "tt4_pkey" for table "tt4"
|
||||
DEBUG: building index "tt4_pkey" on table "tt4"
|
||||
DEBUG: building index "tt4_pkey" on table "tt4" serially
|
||||
SELECT create_distributed_table('tt4', 'id');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -673,7 +673,7 @@ DETAIL: NOTICE from localhost:57638
|
|||
|
||||
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id), FOREIGN KEY(id) REFERENCES tt4(id));
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
|
||||
SELECT create_distributed_table('test_table_2', 'id');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -694,7 +694,7 @@ BEGIN;
|
|||
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
|
||||
CREATE TABLE test_table_1(id int PRIMARY KEY);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1"
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially
|
||||
SELECT create_reference_table('test_table_1');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -711,7 +711,7 @@ DETAIL: NOTICE from localhost:57638
|
|||
|
||||
CREATE TABLE tt4(id int PRIMARY KEY, value_1 int, FOREIGN KEY(id) REFERENCES tt4(id));
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "tt4_pkey" for table "tt4"
|
||||
DEBUG: building index "tt4_pkey" on table "tt4"
|
||||
DEBUG: building index "tt4_pkey" on table "tt4" serially
|
||||
SELECT create_distributed_table('tt4', 'id');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -732,7 +732,7 @@ DETAIL: NOTICE from localhost:57638
|
|||
|
||||
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id), FOREIGN KEY(id) REFERENCES tt4(id));
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
|
||||
SELECT create_distributed_table('test_table_2', 'id');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -762,7 +762,7 @@ BEGIN;
|
|||
|
||||
CREATE TABLE test_table_1(id int PRIMARY KEY);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1"
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially
|
||||
SELECT create_reference_table('test_table_1');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -779,7 +779,7 @@ DETAIL: NOTICE from localhost:57638
|
|||
|
||||
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
|
||||
SELECT create_distributed_table('test_table_2', 'id');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -814,7 +814,7 @@ BEGIN;
|
|||
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
|
||||
CREATE TABLE test_table_1(id int PRIMARY KEY);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1"
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially
|
||||
SELECT create_reference_table('test_table_1');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -831,7 +831,7 @@ DETAIL: NOTICE from localhost:57638
|
|||
|
||||
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
|
||||
SELECT create_distributed_table('test_table_2', 'id');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -861,7 +861,7 @@ COMMIT;
|
|||
BEGIN;
|
||||
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
|
||||
SELECT create_distributed_table('test_table_2', 'id');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -882,7 +882,7 @@ DETAIL: NOTICE from localhost:57638
|
|||
|
||||
CREATE TABLE test_table_1(id int PRIMARY KEY);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1"
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially
|
||||
SELECT create_reference_table('test_table_1');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -912,7 +912,7 @@ BEGIN;
|
|||
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
|
||||
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
|
||||
SELECT create_distributed_table('test_table_2', 'id');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -933,7 +933,7 @@ DETAIL: NOTICE from localhost:57638
|
|||
|
||||
CREATE TABLE test_table_1(id int PRIMARY KEY);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1"
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially
|
||||
SELECT create_reference_table('test_table_1');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -966,7 +966,7 @@ BEGIN;
|
|||
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
|
||||
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
|
||||
SELECT create_distributed_table('test_table_2', 'id');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -993,11 +993,11 @@ BEGIN;
|
|||
|
||||
CREATE TABLE test_table_1(id int PRIMARY KEY);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1"
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially
|
||||
INSERT INTO test_table_1 SELECT i FROM generate_series(0,100) i;
|
||||
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id));
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
|
||||
INSERT INTO test_table_2 SELECT i, i FROM generate_series(0,100) i;
|
||||
SELECT create_reference_table('test_table_1');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
|
@ -1037,11 +1037,11 @@ BEGIN;
|
|||
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
|
||||
CREATE TABLE test_table_1(id int PRIMARY KEY);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1"
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially
|
||||
INSERT INTO test_table_1 SELECT i FROM generate_series(0,100) i;
|
||||
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id));
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
|
||||
INSERT INTO test_table_2 SELECT i, i FROM generate_series(0,100) i;
|
||||
SELECT create_reference_table('test_table_1');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
|
@ -1079,10 +1079,10 @@ BEGIN;
|
|||
|
||||
CREATE TABLE test_table_1(id int PRIMARY KEY);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1"
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially
|
||||
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id));
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
|
||||
SELECT create_reference_table('test_table_1');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -1119,7 +1119,7 @@ DETAIL: NOTICE from localhost:57638
|
|||
|
||||
-- and maybe some other test
|
||||
CREATE INDEX i1 ON test_table_1(id);
|
||||
DEBUG: building index "i1" on table "test_table_1"
|
||||
DEBUG: building index "i1" on table "test_table_1" serially
|
||||
ALTER TABLE test_table_2 ADD CONSTRAINT check_val CHECK (id > 0);
|
||||
DEBUG: verifying table "test_table_2"
|
||||
SELECT count(*) FROM test_table_2;
|
||||
|
@ -1143,7 +1143,7 @@ COMMIT;
|
|||
-- set the mode to sequential for the next operations
|
||||
CREATE TABLE reference_table(id int PRIMARY KEY);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "reference_table_pkey" for table "reference_table"
|
||||
DEBUG: building index "reference_table_pkey" on table "reference_table"
|
||||
DEBUG: building index "reference_table_pkey" on table "reference_table" serially
|
||||
SELECT create_reference_table('reference_table');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -1160,7 +1160,7 @@ DETAIL: NOTICE from localhost:57638
|
|||
|
||||
CREATE TABLE distributed_table(id int PRIMARY KEY, value_1 int);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "distributed_table_pkey" for table "distributed_table"
|
||||
DEBUG: building index "distributed_table_pkey" on table "distributed_table"
|
||||
DEBUG: building index "distributed_table_pkey" on table "distributed_table" serially
|
||||
SELECT create_distributed_table('distributed_table', 'id');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -1199,7 +1199,6 @@ DEBUG: Collecting INSERT ... SELECT results on coordinator
|
|||
-- see https://github.com/citusdata/citus_docs/issues/664 for the discussion
|
||||
WITH t1 AS (DELETE FROM reference_table RETURNING id)
|
||||
DELETE FROM distributed_table USING t1 WHERE value_1 = t1.id RETURNING *;
|
||||
DEBUG: common table expressions are not supported in distributed modifications
|
||||
DEBUG: generating subplan 92_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id
|
||||
DEBUG: Plan 92 query after replacing subqueries and CTEs: DELETE FROM test_fkey_to_ref_in_tx.distributed_table USING (SELECT intermediate_result.id FROM read_intermediate_result('92_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1 WHERE (distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id) RETURNING distributed_table.id, distributed_table.value_1, t1.id
|
||||
DEBUG: switching to sequential query execution mode
|
||||
|
@ -1221,7 +1220,6 @@ DEBUG: Collecting INSERT ... SELECT results on coordinator
|
|||
-- see https://github.com/citusdata/citus_docs/issues/664 for the discussion
|
||||
WITH t1 AS (DELETE FROM reference_table RETURNING id)
|
||||
SELECT count(*) FROM distributed_table, t1 WHERE value_1 = t1.id;
|
||||
DEBUG: data-modifying statements are not supported in the WITH clauses of distributed queries
|
||||
DEBUG: generating subplan 96_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id
|
||||
DEBUG: Plan 96 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM test_fkey_to_ref_in_tx.distributed_table, (SELECT intermediate_result.id FROM read_intermediate_result('96_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1 WHERE (distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id)
|
||||
DEBUG: switching to sequential query execution mode
|
||||
|
@ -1236,7 +1234,6 @@ DETAIL: Reference relation "reference_table" is modified, which might lead to d
|
|||
WITH t1 AS (DELETE FROM distributed_table RETURNING id),
|
||||
t2 AS (DELETE FROM reference_table RETURNING id)
|
||||
SELECT count(*) FROM distributed_table, t1, t2 WHERE value_1 = t1.id AND value_1 = t2.id;
|
||||
DEBUG: data-modifying statements are not supported in the WITH clauses of distributed queries
|
||||
DEBUG: generating subplan 98_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.distributed_table RETURNING id
|
||||
DEBUG: generating subplan 98_2 for CTE t2: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id
|
||||
DEBUG: Plan 98 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM test_fkey_to_ref_in_tx.distributed_table, (SELECT intermediate_result.id FROM read_intermediate_result('98_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1, (SELECT intermediate_result.id FROM read_intermediate_result('98_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t2 WHERE ((distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id) AND (distributed_table.value_1 OPERATOR(pg_catalog.=) t2.id))
|
||||
|
@ -1246,7 +1243,6 @@ HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_m
|
|||
-- table via t1, and then access to the reference table in the main query
|
||||
WITH t1 AS (DELETE FROM distributed_table RETURNING id)
|
||||
DELETE FROM reference_table RETURNING id;
|
||||
DEBUG: common table expressions are not supported in distributed modifications
|
||||
DEBUG: generating subplan 101_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.distributed_table RETURNING id
|
||||
DEBUG: Plan 101 query after replacing subqueries and CTEs: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id
|
||||
ERROR: cannot execute DML on reference relation "reference_table" because there was a parallel DML access to distributed relation "distributed_table" in the same transaction
|
||||
|
@ -1259,7 +1255,6 @@ BEGIN;
|
|||
WITH t1 AS (DELETE FROM distributed_table RETURNING id),
|
||||
t2 AS (DELETE FROM reference_table RETURNING id)
|
||||
SELECT count(*) FROM distributed_table, t1, t2 WHERE value_1 = t1.id AND value_1 = t2.id;
|
||||
DEBUG: data-modifying statements are not supported in the WITH clauses of distributed queries
|
||||
DEBUG: generating subplan 103_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.distributed_table RETURNING id
|
||||
DEBUG: generating subplan 103_2 for CTE t2: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id
|
||||
DEBUG: Plan 103 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM test_fkey_to_ref_in_tx.distributed_table, (SELECT intermediate_result.id FROM read_intermediate_result('103_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1, (SELECT intermediate_result.id FROM read_intermediate_result('103_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t2 WHERE ((distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id) AND (distributed_table.value_1 OPERATOR(pg_catalog.=) t2.id))
|
||||
|
@ -1274,7 +1269,6 @@ BEGIN;
|
|||
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
|
||||
WITH t1 AS (DELETE FROM distributed_table RETURNING id)
|
||||
DELETE FROM reference_table RETURNING id;
|
||||
DEBUG: common table expressions are not supported in distributed modifications
|
||||
DEBUG: generating subplan 106_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.distributed_table RETURNING id
|
||||
DEBUG: Plan 106 query after replacing subqueries and CTEs: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id
|
||||
id
|
||||
|
|
|
@ -7,7 +7,7 @@ SHOW server_version \gset
|
|||
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
|
||||
version_above_ten
|
||||
-------------------
|
||||
t
|
||||
f
|
||||
(1 row)
|
||||
|
||||
CREATE SCHEMA test_fkey_to_ref_in_tx;
|
||||
|
@ -124,7 +124,7 @@ BEGIN;
|
|||
|
||||
ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE bigint;
|
||||
DEBUG: rewriting table "on_update_fkey_table"
|
||||
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
|
||||
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table"
|
||||
DEBUG: validating foreign key constraint "fkey"
|
||||
ROLLBACK;
|
||||
-- case 1.6: SELECT to a reference table is followed by an unrelated DDL
|
||||
|
@ -274,7 +274,7 @@ DEBUG: switching to sequential query execution mode
|
|||
DETAIL: Reference relation "referece_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode
|
||||
ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE bigint;
|
||||
DEBUG: rewriting table "on_update_fkey_table"
|
||||
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
|
||||
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table"
|
||||
DEBUG: validating foreign key constraint "fkey"
|
||||
ROLLBACK;
|
||||
-- case 2.6: UPDATE to a reference table is followed by an unrelated DDL
|
||||
|
@ -297,7 +297,7 @@ BEGIN;
|
|||
DEBUG: switching to sequential query execution mode
|
||||
DETAIL: Reference relation "referece_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode
|
||||
TRUNCATE on_update_fkey_table;
|
||||
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
|
||||
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table"
|
||||
ROLLBACK;
|
||||
-- case 3.1: an unrelated DDL to a reference table is followed by a real-time SELECT
|
||||
BEGIN;
|
||||
|
@ -342,20 +342,20 @@ ROLLBACK;
|
|||
BEGIN;
|
||||
ALTER TABLE referece_table ALTER COLUMN id SET DATA TYPE smallint;
|
||||
DEBUG: rewriting table "referece_table"
|
||||
DEBUG: building index "referece_table_pkey" on table "referece_table" serially
|
||||
DEBUG: building index "referece_table_pkey" on table "referece_table"
|
||||
DEBUG: validating foreign key constraint "fkey"
|
||||
CREATE INDEX fkey_test_index_1 ON on_update_fkey_table(value_1);
|
||||
DEBUG: building index "fkey_test_index_1" on table "on_update_fkey_table" serially
|
||||
DEBUG: building index "fkey_test_index_1" on table "on_update_fkey_table"
|
||||
ROLLBACK;
|
||||
-- case 4.6: DDL to reference table followed by a DDL to dist table, both touching fkey columns
|
||||
BEGIN;
|
||||
ALTER TABLE referece_table ALTER COLUMN id SET DATA TYPE smallint;
|
||||
DEBUG: rewriting table "referece_table"
|
||||
DEBUG: building index "referece_table_pkey" on table "referece_table" serially
|
||||
DEBUG: building index "referece_table_pkey" on table "referece_table"
|
||||
DEBUG: validating foreign key constraint "fkey"
|
||||
ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE smallint;
|
||||
DEBUG: rewriting table "on_update_fkey_table"
|
||||
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
|
||||
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table"
|
||||
DEBUG: validating foreign key constraint "fkey"
|
||||
ROLLBACK;
|
||||
-- case 3.7: DDL to a reference table is followed by COPY
|
||||
|
@ -371,16 +371,16 @@ BEGIN;
|
|||
DEBUG: switching to sequential query execution mode
|
||||
DETAIL: Reference relation "referece_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode
|
||||
TRUNCATE on_update_fkey_table;
|
||||
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
|
||||
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table"
|
||||
ROLLBACK;
|
||||
-- case 3.9: DDL to a reference table is followed by TRUNCATE
|
||||
BEGIN;
|
||||
ALTER TABLE referece_table ALTER COLUMN id SET DATA TYPE smallint;
|
||||
DEBUG: rewriting table "referece_table"
|
||||
DEBUG: building index "referece_table_pkey" on table "referece_table" serially
|
||||
DEBUG: building index "referece_table_pkey" on table "referece_table"
|
||||
DEBUG: validating foreign key constraint "fkey"
|
||||
TRUNCATE on_update_fkey_table;
|
||||
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
|
||||
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table"
|
||||
ROLLBACK;
|
||||
-----
|
||||
--- Now, start testing the other way araound
|
||||
|
@ -435,7 +435,7 @@ BEGIN;
|
|||
|
||||
ALTER TABLE referece_table ALTER COLUMN id SET DATA TYPE smallint;
|
||||
DEBUG: rewriting table "referece_table"
|
||||
DEBUG: building index "referece_table_pkey" on table "referece_table" serially
|
||||
DEBUG: building index "referece_table_pkey" on table "referece_table"
|
||||
DEBUG: validating foreign key constraint "fkey"
|
||||
ERROR: cannot execute DDL on reference relation "referece_table" because there was a parallel SELECT access to distributed relation "on_update_fkey_table" in the same transaction
|
||||
HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';"
|
||||
|
@ -475,8 +475,8 @@ DEBUG: truncate cascades to table "on_update_fkey_table_2380001"
|
|||
DETAIL: NOTICE from localhost:57637
|
||||
DEBUG: truncate cascades to table "on_update_fkey_table_2380003"
|
||||
DETAIL: NOTICE from localhost:57637
|
||||
DEBUG: building index "referece_table_pkey" on table "referece_table" serially
|
||||
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
|
||||
DEBUG: building index "referece_table_pkey" on table "referece_table"
|
||||
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table"
|
||||
ROLLBACK;
|
||||
-- case 5.1: Parallel UPDATE on distributed table follow by a SELECT
|
||||
BEGIN;
|
||||
|
@ -507,7 +507,7 @@ BEGIN;
|
|||
UPDATE on_update_fkey_table SET value_1 = 16 WHERE value_1 = 15;
|
||||
ALTER TABLE referece_table ALTER COLUMN id SET DATA TYPE smallint;
|
||||
DEBUG: rewriting table "referece_table"
|
||||
DEBUG: building index "referece_table_pkey" on table "referece_table" serially
|
||||
DEBUG: building index "referece_table_pkey" on table "referece_table"
|
||||
DEBUG: validating foreign key constraint "fkey"
|
||||
ERROR: cannot execute DDL on reference relation "referece_table" because there was a parallel DML access to distributed relation "on_update_fkey_table" in the same transaction
|
||||
HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';"
|
||||
|
@ -523,7 +523,7 @@ ROLLBACK;
|
|||
BEGIN;
|
||||
ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE smallint;
|
||||
DEBUG: rewriting table "on_update_fkey_table"
|
||||
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
|
||||
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table"
|
||||
DEBUG: validating foreign key constraint "fkey"
|
||||
UPDATE referece_table SET id = 160 WHERE id = 15;
|
||||
ROLLBACK;
|
||||
|
@ -590,7 +590,7 @@ ROLLBACK;
|
|||
BEGIN;
|
||||
CREATE TABLE test_table_1(id int PRIMARY KEY);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1"
|
||||
SELECT create_reference_table('test_table_1');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -607,7 +607,7 @@ DETAIL: NOTICE from localhost:57638
|
|||
|
||||
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id));
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
|
||||
SELECT create_distributed_table('test_table_2', 'id');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -635,7 +635,7 @@ ROLLBACK;
|
|||
BEGIN;
|
||||
CREATE TABLE test_table_1(id int PRIMARY KEY);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1"
|
||||
SELECT create_reference_table('test_table_1');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -652,7 +652,7 @@ DETAIL: NOTICE from localhost:57638
|
|||
|
||||
CREATE TABLE tt4(id int PRIMARY KEY, value_1 int, FOREIGN KEY(id) REFERENCES tt4(id));
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "tt4_pkey" for table "tt4"
|
||||
DEBUG: building index "tt4_pkey" on table "tt4" serially
|
||||
DEBUG: building index "tt4_pkey" on table "tt4"
|
||||
SELECT create_distributed_table('tt4', 'id');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -673,7 +673,7 @@ DETAIL: NOTICE from localhost:57638
|
|||
|
||||
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id), FOREIGN KEY(id) REFERENCES tt4(id));
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
|
||||
SELECT create_distributed_table('test_table_2', 'id');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -694,7 +694,7 @@ BEGIN;
|
|||
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
|
||||
CREATE TABLE test_table_1(id int PRIMARY KEY);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1"
|
||||
SELECT create_reference_table('test_table_1');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -711,7 +711,7 @@ DETAIL: NOTICE from localhost:57638
|
|||
|
||||
CREATE TABLE tt4(id int PRIMARY KEY, value_1 int, FOREIGN KEY(id) REFERENCES tt4(id));
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "tt4_pkey" for table "tt4"
|
||||
DEBUG: building index "tt4_pkey" on table "tt4" serially
|
||||
DEBUG: building index "tt4_pkey" on table "tt4"
|
||||
SELECT create_distributed_table('tt4', 'id');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -732,7 +732,7 @@ DETAIL: NOTICE from localhost:57638
|
|||
|
||||
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id), FOREIGN KEY(id) REFERENCES tt4(id));
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
|
||||
SELECT create_distributed_table('test_table_2', 'id');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -762,7 +762,7 @@ BEGIN;
|
|||
|
||||
CREATE TABLE test_table_1(id int PRIMARY KEY);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1"
|
||||
SELECT create_reference_table('test_table_1');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -779,7 +779,7 @@ DETAIL: NOTICE from localhost:57638
|
|||
|
||||
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
|
||||
SELECT create_distributed_table('test_table_2', 'id');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -814,7 +814,7 @@ BEGIN;
|
|||
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
|
||||
CREATE TABLE test_table_1(id int PRIMARY KEY);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1"
|
||||
SELECT create_reference_table('test_table_1');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -831,7 +831,7 @@ DETAIL: NOTICE from localhost:57638
|
|||
|
||||
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
|
||||
SELECT create_distributed_table('test_table_2', 'id');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -861,7 +861,7 @@ COMMIT;
|
|||
BEGIN;
|
||||
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
|
||||
SELECT create_distributed_table('test_table_2', 'id');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -882,7 +882,7 @@ DETAIL: NOTICE from localhost:57638
|
|||
|
||||
CREATE TABLE test_table_1(id int PRIMARY KEY);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1"
|
||||
SELECT create_reference_table('test_table_1');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -912,7 +912,7 @@ BEGIN;
|
|||
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
|
||||
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
|
||||
SELECT create_distributed_table('test_table_2', 'id');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -933,7 +933,7 @@ DETAIL: NOTICE from localhost:57638
|
|||
|
||||
CREATE TABLE test_table_1(id int PRIMARY KEY);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1"
|
||||
SELECT create_reference_table('test_table_1');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -966,7 +966,7 @@ BEGIN;
|
|||
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
|
||||
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
|
||||
SELECT create_distributed_table('test_table_2', 'id');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -993,11 +993,11 @@ BEGIN;
|
|||
|
||||
CREATE TABLE test_table_1(id int PRIMARY KEY);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1"
|
||||
INSERT INTO test_table_1 SELECT i FROM generate_series(0,100) i;
|
||||
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id));
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
|
||||
INSERT INTO test_table_2 SELECT i, i FROM generate_series(0,100) i;
|
||||
SELECT create_reference_table('test_table_1');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
|
@ -1037,11 +1037,11 @@ BEGIN;
|
|||
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
|
||||
CREATE TABLE test_table_1(id int PRIMARY KEY);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1"
|
||||
INSERT INTO test_table_1 SELECT i FROM generate_series(0,100) i;
|
||||
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id));
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
|
||||
INSERT INTO test_table_2 SELECT i, i FROM generate_series(0,100) i;
|
||||
SELECT create_reference_table('test_table_1');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
|
@ -1079,10 +1079,10 @@ BEGIN;
|
|||
|
||||
CREATE TABLE test_table_1(id int PRIMARY KEY);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially
|
||||
DEBUG: building index "test_table_1_pkey" on table "test_table_1"
|
||||
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id));
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
|
||||
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
|
||||
SELECT create_reference_table('test_table_1');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -1119,7 +1119,7 @@ DETAIL: NOTICE from localhost:57638
|
|||
|
||||
-- and maybe some other test
|
||||
CREATE INDEX i1 ON test_table_1(id);
|
||||
DEBUG: building index "i1" on table "test_table_1" serially
|
||||
DEBUG: building index "i1" on table "test_table_1"
|
||||
ALTER TABLE test_table_2 ADD CONSTRAINT check_val CHECK (id > 0);
|
||||
DEBUG: verifying table "test_table_2"
|
||||
SELECT count(*) FROM test_table_2;
|
||||
|
@ -1143,7 +1143,7 @@ COMMIT;
|
|||
-- set the mode to sequential for the next operations
|
||||
CREATE TABLE reference_table(id int PRIMARY KEY);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "reference_table_pkey" for table "reference_table"
|
||||
DEBUG: building index "reference_table_pkey" on table "reference_table" serially
|
||||
DEBUG: building index "reference_table_pkey" on table "reference_table"
|
||||
SELECT create_reference_table('reference_table');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -1160,7 +1160,7 @@ DETAIL: NOTICE from localhost:57638
|
|||
|
||||
CREATE TABLE distributed_table(id int PRIMARY KEY, value_1 int);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "distributed_table_pkey" for table "distributed_table"
|
||||
DEBUG: building index "distributed_table_pkey" on table "distributed_table" serially
|
||||
DEBUG: building index "distributed_table_pkey" on table "distributed_table"
|
||||
SELECT create_distributed_table('distributed_table', 'id');
|
||||
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:57638
|
||||
|
@ -1199,7 +1199,6 @@ DEBUG: Collecting INSERT ... SELECT results on coordinator
|
|||
-- see https://github.com/citusdata/citus_docs/issues/664 for the discussion
|
||||
WITH t1 AS (DELETE FROM reference_table RETURNING id)
|
||||
DELETE FROM distributed_table USING t1 WHERE value_1 = t1.id RETURNING *;
|
||||
DEBUG: common table expressions are not supported in distributed modifications
|
||||
DEBUG: generating subplan 92_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id
|
||||
DEBUG: Plan 92 query after replacing subqueries and CTEs: DELETE FROM test_fkey_to_ref_in_tx.distributed_table USING (SELECT intermediate_result.id FROM read_intermediate_result('92_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1 WHERE (distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id) RETURNING distributed_table.id, distributed_table.value_1, t1.id
|
||||
DEBUG: switching to sequential query execution mode
|
||||
|
@ -1221,7 +1220,6 @@ DEBUG: Collecting INSERT ... SELECT results on coordinator
|
|||
-- see https://github.com/citusdata/citus_docs/issues/664 for the discussion
|
||||
WITH t1 AS (DELETE FROM reference_table RETURNING id)
|
||||
SELECT count(*) FROM distributed_table, t1 WHERE value_1 = t1.id;
|
||||
DEBUG: data-modifying statements are not supported in the WITH clauses of distributed queries
|
||||
DEBUG: generating subplan 96_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id
|
||||
DEBUG: Plan 96 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM test_fkey_to_ref_in_tx.distributed_table, (SELECT intermediate_result.id FROM read_intermediate_result('96_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1 WHERE (distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id)
|
||||
DEBUG: switching to sequential query execution mode
|
||||
|
@ -1236,7 +1234,6 @@ DETAIL: Reference relation "reference_table" is modified, which might lead to d
|
|||
WITH t1 AS (DELETE FROM distributed_table RETURNING id),
|
||||
t2 AS (DELETE FROM reference_table RETURNING id)
|
||||
SELECT count(*) FROM distributed_table, t1, t2 WHERE value_1 = t1.id AND value_1 = t2.id;
|
||||
DEBUG: data-modifying statements are not supported in the WITH clauses of distributed queries
|
||||
DEBUG: generating subplan 98_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.distributed_table RETURNING id
|
||||
DEBUG: generating subplan 98_2 for CTE t2: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id
|
||||
DEBUG: Plan 98 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM test_fkey_to_ref_in_tx.distributed_table, (SELECT intermediate_result.id FROM read_intermediate_result('98_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1, (SELECT intermediate_result.id FROM read_intermediate_result('98_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t2 WHERE ((distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id) AND (distributed_table.value_1 OPERATOR(pg_catalog.=) t2.id))
|
||||
|
@ -1246,7 +1243,6 @@ HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_m
|
|||
-- table via t1, and then access to the reference table in the main query
|
||||
WITH t1 AS (DELETE FROM distributed_table RETURNING id)
|
||||
DELETE FROM reference_table RETURNING id;
|
||||
DEBUG: common table expressions are not supported in distributed modifications
|
||||
DEBUG: generating subplan 101_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.distributed_table RETURNING id
|
||||
DEBUG: Plan 101 query after replacing subqueries and CTEs: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id
|
||||
ERROR: cannot execute DML on reference relation "reference_table" because there was a parallel DML access to distributed relation "distributed_table" in the same transaction
|
||||
|
@ -1259,7 +1255,6 @@ BEGIN;
|
|||
WITH t1 AS (DELETE FROM distributed_table RETURNING id),
|
||||
t2 AS (DELETE FROM reference_table RETURNING id)
|
||||
SELECT count(*) FROM distributed_table, t1, t2 WHERE value_1 = t1.id AND value_1 = t2.id;
|
||||
DEBUG: data-modifying statements are not supported in the WITH clauses of distributed queries
|
||||
DEBUG: generating subplan 103_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.distributed_table RETURNING id
|
||||
DEBUG: generating subplan 103_2 for CTE t2: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id
|
||||
DEBUG: Plan 103 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM test_fkey_to_ref_in_tx.distributed_table, (SELECT intermediate_result.id FROM read_intermediate_result('103_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1, (SELECT intermediate_result.id FROM read_intermediate_result('103_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t2 WHERE ((distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id) AND (distributed_table.value_1 OPERATOR(pg_catalog.=) t2.id))
|
||||
|
@ -1274,7 +1269,6 @@ BEGIN;
|
|||
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
|
||||
WITH t1 AS (DELETE FROM distributed_table RETURNING id)
|
||||
DELETE FROM reference_table RETURNING id;
|
||||
DEBUG: common table expressions are not supported in distributed modifications
|
||||
DEBUG: generating subplan 106_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.distributed_table RETURNING id
|
||||
DEBUG: Plan 106 query after replacing subqueries and CTEs: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id
|
||||
id
|
||||
|
|
|
@ -45,16 +45,16 @@ step s3-view-worker:
|
|||
|
||||
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
|
||||
|
||||
SELECT worker_apply_shard_ddl_command (105949, 'public', '
|
||||
SELECT worker_apply_shard_ddl_command (105958, 'public', '
|
||||
ALTER TABLE test_table ADD COLUMN x INT;
|
||||
')localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
SELECT worker_apply_shard_ddl_command (105948, 'public', '
|
||||
SELECT worker_apply_shard_ddl_command (105957, 'public', '
|
||||
ALTER TABLE test_table ADD COLUMN x INT;
|
||||
')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
SELECT worker_apply_shard_ddl_command (105947, 'public', '
|
||||
SELECT worker_apply_shard_ddl_command (105956, 'public', '
|
||||
ALTER TABLE test_table ADD COLUMN x INT;
|
||||
')localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
SELECT worker_apply_shard_ddl_command (105946, 'public', '
|
||||
SELECT worker_apply_shard_ddl_command (105955, 'public', '
|
||||
ALTER TABLE test_table ADD COLUMN x INT;
|
||||
')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
SELECT gid FROM pg_prepared_xacts WHERE gid LIKE 'citus\_0\_%'localhost 57638 0 idle Client ClientRead postgres regression
|
||||
|
@ -116,7 +116,7 @@ query query_hostname query_hostport master_query_host_namemaster_query_
|
|||
|
||||
SELECT gid FROM pg_prepared_xacts WHERE gid LIKE 'citus\_0\_%'localhost 57638 0 idle Client ClientRead postgres regression
|
||||
SELECT gid FROM pg_prepared_xacts WHERE gid LIKE 'citus\_0\_%'localhost 57637 0 idle Client ClientRead postgres regression
|
||||
INSERT INTO public.test_table_105952 (column1, column2) VALUES (100, 100)localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
INSERT INTO public.test_table_105961 (column1, column2) VALUES (100, 100)localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
step s2-rollback:
|
||||
ROLLBACK;
|
||||
|
||||
|
@ -177,10 +177,10 @@ query query_hostname query_hostport master_query_host_namemaster_query_
|
|||
|
||||
SELECT gid FROM pg_prepared_xacts WHERE gid LIKE 'citus\_0\_%'localhost 57638 0 idle Client ClientRead postgres regression
|
||||
SELECT gid FROM pg_prepared_xacts WHERE gid LIKE 'citus\_0\_%'localhost 57637 0 idle Client ClientRead postgres regression
|
||||
COPY (SELECT count(*) AS count FROM test_table_105957 test_table WHERE true) TO STDOUTlocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
COPY (SELECT count(*) AS count FROM test_table_105956 test_table WHERE true) TO STDOUTlocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
COPY (SELECT count(*) AS count FROM test_table_105955 test_table WHERE true) TO STDOUTlocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
COPY (SELECT count(*) AS count FROM test_table_105954 test_table WHERE true) TO STDOUTlocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
COPY (SELECT count(*) AS count FROM test_table_105966 test_table WHERE true) TO STDOUTlocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
COPY (SELECT count(*) AS count FROM test_table_105965 test_table WHERE true) TO STDOUTlocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
COPY (SELECT count(*) AS count FROM test_table_105964 test_table WHERE true) TO STDOUTlocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
COPY (SELECT count(*) AS count FROM test_table_105963 test_table WHERE true) TO STDOUTlocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
step s2-rollback:
|
||||
ROLLBACK;
|
||||
|
||||
|
@ -241,7 +241,7 @@ query query_hostname query_hostport master_query_host_namemaster_query_
|
|||
|
||||
SELECT gid FROM pg_prepared_xacts WHERE gid LIKE 'citus\_0\_%'localhost 57638 0 idle Client ClientRead postgres regression
|
||||
SELECT gid FROM pg_prepared_xacts WHERE gid LIKE 'citus\_0\_%'localhost 57637 0 idle Client ClientRead postgres regression
|
||||
SELECT count(*) AS count FROM public.test_table_105959 test_table WHERE (column1 OPERATOR(pg_catalog.=) 55)localhost 57638 0 idle Client ClientRead postgres regression
|
||||
SELECT count(*) AS count FROM public.test_table_105968 test_table WHERE (column1 OPERATOR(pg_catalog.=) 55)localhost 57638 0 idle Client ClientRead postgres regression
|
||||
step s2-rollback:
|
||||
ROLLBACK;
|
||||
|
||||
|
|
|
@ -374,3 +374,28 @@ step s2-finish:
|
|||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-update-rt-with-cte-select-from-rt s2-begin s2-update-rt s1-finish s2-finish
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-update-rt-with-cte-select-from-rt:
|
||||
WITH foo AS (SELECT * FROM ref_table FOR UPDATE)
|
||||
UPDATE ref_table SET val_1 = 4 FROM foo WHERE ref_table.id = foo.id;
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-update-rt:
|
||||
UPDATE ref_table SET val_1 = 5 WHERE id = 1;
|
||||
<waiting ...>
|
||||
step s1-finish:
|
||||
COMMIT;
|
||||
|
||||
step s2-update-rt: <... completed>
|
||||
step s2-finish:
|
||||
COMMIT;
|
||||
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
|
|
@ -33,6 +33,7 @@ SET client_min_messages TO DEBUG2;
|
|||
-- Check that we can prune shards for simple cases, boolean expressions and
|
||||
-- immutable functions.
|
||||
SELECT count(*) FROM orders_hash_partitioned;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
count
|
||||
-------
|
||||
0
|
||||
|
@ -96,30 +97,35 @@ DETAIL: distribution column value: 1
|
|||
-- disable router planning
|
||||
SET citus.enable_router_execution TO 'false';
|
||||
SELECT count(*) FROM orders_hash_partitioned;
|
||||
DEBUG: Router planner not enabled.
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1;
|
||||
DEBUG: Router planner not enabled.
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 2;
|
||||
DEBUG: Router planner not enabled.
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 3;
|
||||
DEBUG: Router planner not enabled.
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 4;
|
||||
DEBUG: Router planner not enabled.
|
||||
count
|
||||
-------
|
||||
0
|
||||
|
@ -127,12 +133,14 @@ SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 4;
|
|||
|
||||
SELECT count(*) FROM orders_hash_partitioned
|
||||
WHERE o_orderkey = 1 AND o_clerk = 'aaa';
|
||||
DEBUG: Router planner not enabled.
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = abs(-1);
|
||||
DEBUG: Router planner not enabled.
|
||||
count
|
||||
-------
|
||||
0
|
||||
|
@ -140,18 +148,21 @@ SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = abs(-1);
|
|||
|
||||
SET citus.enable_router_execution TO DEFAULT;
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey is NULL;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey is not NULL;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey > 2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
count
|
||||
-------
|
||||
0
|
||||
|
@ -159,6 +170,7 @@ SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey > 2;
|
|||
|
||||
SELECT count(*) FROM orders_hash_partitioned
|
||||
WHERE o_orderkey = 1 OR o_orderkey = 2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
count
|
||||
-------
|
||||
0
|
||||
|
@ -166,6 +178,7 @@ SELECT count(*) FROM orders_hash_partitioned
|
|||
|
||||
SELECT count(*) FROM orders_hash_partitioned
|
||||
WHERE o_orderkey = 1 OR o_clerk = 'aaa';
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
count
|
||||
-------
|
||||
0
|
||||
|
@ -173,6 +186,7 @@ SELECT count(*) FROM orders_hash_partitioned
|
|||
|
||||
SELECT count(*) FROM orders_hash_partitioned
|
||||
WHERE o_orderkey = 1 OR (o_orderkey = 3 AND o_clerk = 'aaa');
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
count
|
||||
-------
|
||||
0
|
||||
|
@ -180,6 +194,7 @@ SELECT count(*) FROM orders_hash_partitioned
|
|||
|
||||
SELECT count(*) FROM orders_hash_partitioned
|
||||
WHERE o_orderkey = 1 OR o_orderkey is NULL;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
count
|
||||
-------
|
||||
0
|
||||
|
@ -301,6 +316,7 @@ SET client_min_messages TO DEBUG2;
|
|||
-- equality operator
|
||||
SELECT count(*) FROM orders_hash_partitioned
|
||||
WHERE o_orderkey < ALL ('{1,2,3}');
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
count
|
||||
-------
|
||||
0
|
||||
|
@ -310,6 +326,7 @@ SELECT count(*) FROM orders_hash_partitioned
|
|||
-- columns are used with ANY/IN/ALL
|
||||
SELECT count(*) FROM orders_hash_partitioned
|
||||
WHERE o_orderkey = 1 OR o_totalprice IN (2, 5);
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
count
|
||||
-------
|
||||
0
|
||||
|
@ -317,6 +334,7 @@ SELECT count(*) FROM orders_hash_partitioned
|
|||
|
||||
-- Check that we cannot prune for mutable functions.
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = random();
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
count
|
||||
-------
|
||||
0
|
||||
|
@ -324,6 +342,7 @@ SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = random();
|
|||
|
||||
SELECT count(*) FROM orders_hash_partitioned
|
||||
WHERE o_orderkey = random() OR o_orderkey = 1;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
count
|
||||
-------
|
||||
0
|
||||
|
@ -343,6 +362,7 @@ DETAIL: distribution column value: 1
|
|||
SELECT count(*)
|
||||
FROM orders_hash_partitioned orders1, orders_hash_partitioned orders2
|
||||
WHERE orders1.o_orderkey = orders2.o_orderkey;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [-1073741824,-1]
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [0,1073741823]
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [1073741824,2147483647]
|
||||
|
|
|
@ -633,7 +633,9 @@ INSERT INTO agg_events (value_1_agg, user_id)
|
|||
raw_events_first;
|
||||
DEBUG: DISTINCT ON (non-partition column) clauses are not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
SELECT user_id, value_1_agg FROM agg_events ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
user_id | value_1_agg
|
||||
---------+-------------
|
||||
1 | 10
|
||||
|
@ -664,6 +666,7 @@ DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_t
|
|||
DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg) SELECT DISTINCT ON (user_id) user_id, value_1 FROM public.raw_events_first_13300003 raw_events_first WHERE ((worker_hash(user_id) OPERATOR(pg_catalog.>=) 1073741824) AND (worker_hash(user_id) OPERATOR(pg_catalog.<=) 2147483647))
|
||||
DEBUG: Plan is router executable
|
||||
SELECT user_id, value_1_agg FROM agg_events ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
user_id | value_1_agg
|
||||
---------+-------------
|
||||
1 | 10
|
||||
|
@ -689,7 +692,9 @@ INSERT INTO agg_events
|
|||
fist_table_agg;
|
||||
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 51_1 for CTE fist_table_agg: SELECT (max(value_1) OPERATOR(pg_catalog.+) 1) AS v1_agg, user_id FROM public.raw_events_first GROUP BY user_id
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Plan 51 query after replacing subqueries and CTEs: SELECT user_id, v1_agg FROM (SELECT fist_table_agg.v1_agg, fist_table_agg.user_id FROM (SELECT intermediate_result.v1_agg, intermediate_result.user_id FROM read_intermediate_result('51_1'::text, 'binary'::citus_copy_format) intermediate_result(v1_agg integer, user_id integer)) fist_table_agg) citus_insert_select_subquery
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -703,8 +708,10 @@ INSERT INTO agg_events
|
|||
raw_events_first;
|
||||
DEBUG: Subqueries without relations are not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 54_1 for CTE sub_cte: SELECT 1
|
||||
DEBUG: Plan 54 query after replacing subqueries and CTEs: SELECT user_id, (SELECT sub_cte."?column?" FROM (SELECT intermediate_result."?column?" FROM read_intermediate_result('54_1'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer)) sub_cte) FROM public.raw_events_first
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
ERROR: could not run distributed query with subquery outside the FROM and WHERE clauses
|
||||
HINT: Consider using an equality filter on the distributed table's partition column.
|
||||
-- We support set operations via the coordinator
|
||||
|
@ -718,6 +725,7 @@ FROM
|
|||
(SELECT user_id FROM raw_events_second)) as foo;
|
||||
DEBUG: Set operations are not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
ROLLBACK;
|
||||
-- We do support set operations through recursive planning
|
||||
BEGIN;
|
||||
|
@ -728,7 +736,10 @@ INSERT INTO
|
|||
(SELECT user_id FROM raw_events_first);
|
||||
DEBUG: Set operations are not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 58_1 for subquery SELECT user_id FROM public.raw_events_first
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 58_2 for subquery SELECT user_id FROM public.raw_events_first
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -1073,6 +1084,8 @@ FROM (SELECT SUM(raw_events_second.value_4) AS v4,
|
|||
GROUP BY raw_events_second.value_3) AS foo;
|
||||
DEBUG: Group by list without distribution column is not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [-1073741824,-1]
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [0,1073741823]
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [1073741824,2147483647]
|
||||
|
@ -1099,6 +1112,7 @@ FROM raw_events_first;
|
|||
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
|
||||
DETAIL: the query doesn't include the target table's partition column
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
ERROR: the partition column of table public.raw_events_second should have a value
|
||||
INSERT INTO raw_events_second
|
||||
(value_1)
|
||||
|
@ -1107,6 +1121,7 @@ FROM raw_events_first;
|
|||
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
|
||||
DETAIL: the query doesn't include the target table's partition column
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
ERROR: the partition column of table public.raw_events_second should have a value
|
||||
INSERT INTO raw_events_second
|
||||
(user_id)
|
||||
|
@ -1115,6 +1130,7 @@ FROM raw_events_first;
|
|||
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
|
||||
DETAIL: The target table's partition column should correspond to a partition column in the subquery.
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
ERROR: the partition column of table public.raw_events_second cannot be NULL
|
||||
INSERT INTO raw_events_second
|
||||
(user_id)
|
||||
|
@ -1124,6 +1140,7 @@ DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition
|
|||
DETAIL: Subquery contains an operator in the same position as the target table's partition column.
|
||||
HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery.
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
INSERT INTO raw_events_second
|
||||
(user_id)
|
||||
SELECT user_id :: bigint
|
||||
|
@ -1132,6 +1149,7 @@ DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition
|
|||
DETAIL: Subquery contains an explicit cast in the same position as the target table's partition column.
|
||||
HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery.
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
INSERT INTO agg_events
|
||||
(value_3_agg,
|
||||
value_4_agg,
|
||||
|
@ -1149,6 +1167,7 @@ DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition
|
|||
DETAIL: Subquery contains an aggregation in the same position as the target table's partition column.
|
||||
HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery.
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
ERROR: the partition column of table public.agg_events cannot be NULL
|
||||
INSERT INTO agg_events
|
||||
(value_3_agg,
|
||||
|
@ -1167,6 +1186,7 @@ GROUP BY user_id,
|
|||
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
|
||||
DETAIL: The target table's partition column should correspond to a partition column in the subquery.
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
ERROR: the partition column of table public.agg_events cannot be NULL
|
||||
-- tables should be co-located
|
||||
INSERT INTO agg_events (user_id)
|
||||
|
@ -1205,6 +1225,8 @@ FROM (SELECT SUM(raw_events_second.value_4) AS v4,
|
|||
ON (f.id = f2.id);
|
||||
DEBUG: Group by list without distribution column is not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [-1073741824,-1]
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [0,1073741823]
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [1073741824,2147483647]
|
||||
|
@ -1219,6 +1241,7 @@ DEBUG: join prunable for intervals [1073741824,2147483647] and [-1073741824,-1]
|
|||
DEBUG: join prunable for intervals [1073741824,2147483647] and [0,1073741823]
|
||||
DEBUG: generating subplan 107_1 for subquery SELECT sum(raw_events_second.value_4) AS v4, raw_events_second.value_1 AS v1, sum(raw_events_second.user_id) AS id FROM public.raw_events_first, public.raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.value_1 HAVING (sum(raw_events_second.value_4) OPERATOR(pg_catalog.>) (10)::numeric)
|
||||
DEBUG: Plan 107 query after replacing subqueries and CTEs: SELECT f2.id FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first, public.reference_table WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT intermediate_result.v4, intermediate_result.v1, intermediate_result.id FROM read_intermediate_result('107_1'::text, 'binary'::citus_copy_format) intermediate_result(v4 numeric, v1 integer, id bigint)) foo2) f2 ON ((f.id OPERATOR(pg_catalog.=) f2.id)))
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
-- the second part of the query is not routable since
|
||||
-- GROUP BY not on the partition column (i.e., value_1) and thus join
|
||||
-- on f.id = f2.id is not on the partition key (instead on the sum of partition key)
|
||||
|
@ -1247,6 +1270,8 @@ FROM (SELECT SUM(raw_events_second.value_4) AS v4,
|
|||
ON (f.id = f2.id);
|
||||
DEBUG: Group by list without distribution column is not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [-1073741824,-1]
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [0,1073741823]
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [1073741824,2147483647]
|
||||
|
@ -1261,6 +1286,7 @@ DEBUG: join prunable for intervals [1073741824,2147483647] and [-1073741824,-1]
|
|||
DEBUG: join prunable for intervals [1073741824,2147483647] and [0,1073741823]
|
||||
DEBUG: generating subplan 110_1 for subquery SELECT sum(raw_events_second.value_4) AS v4, raw_events_second.value_1 AS v1, sum(raw_events_second.user_id) AS id FROM public.raw_events_first, public.raw_events_second WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id) GROUP BY raw_events_second.value_1 HAVING (sum(raw_events_second.value_4) OPERATOR(pg_catalog.>) (10)::numeric)
|
||||
DEBUG: Plan 110 query after replacing subqueries and CTEs: SELECT f.id FROM ((SELECT foo.id FROM (SELECT raw_events_first.user_id AS id FROM public.raw_events_first, public.reference_table WHERE (raw_events_first.user_id OPERATOR(pg_catalog.=) reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT intermediate_result.v4, intermediate_result.v1, intermediate_result.id FROM read_intermediate_result('110_1'::text, 'binary'::citus_copy_format) intermediate_result(v4 numeric, v1 integer, id bigint)) foo2) f2 ON ((f.id OPERATOR(pg_catalog.=) f2.id)))
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
-- cannot pushdown the query since the JOIN is not equi JOIN
|
||||
INSERT INTO agg_events
|
||||
(user_id, value_4_agg)
|
||||
|
@ -1670,6 +1696,7 @@ FROM raw_events_second
|
|||
GROUP BY grouping sets ( ( user_id ), ( value_1 ), ( user_id, value_1 ), ( ) );
|
||||
DEBUG: grouping sets are not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
ERROR: could not run distributed query with GROUPING SETS, CUBE, or ROLLUP
|
||||
HINT: Consider using an equality filter on the distributed table's partition column.
|
||||
-- set back to INFO
|
||||
|
@ -2085,50 +2112,60 @@ DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition
|
|||
DETAIL: Subquery contains a case expression in the same position as the target table's partition column.
|
||||
HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery.
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
INSERT INTO text_table (part_col) SELECT COALESCE(part_col, 'onder') FROM text_table;
|
||||
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
|
||||
DETAIL: Subquery contains a coalesce expression in the same position as the target table's partition column.
|
||||
HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery.
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
INSERT INTO text_table (part_col) SELECT GREATEST(part_col, 'jason') FROM text_table;
|
||||
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
|
||||
DETAIL: Subquery contains a min/max expression in the same position as the target table's partition column.
|
||||
HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery.
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
INSERT INTO text_table (part_col) SELECT LEAST(part_col, 'andres') FROM text_table;
|
||||
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
|
||||
DETAIL: Subquery contains a min/max expression in the same position as the target table's partition column.
|
||||
HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery.
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
INSERT INTO text_table (part_col) SELECT NULLIF(part_col, 'metin') FROM text_table;
|
||||
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
|
||||
DETAIL: Subquery contains an expression that is not a simple column reference in the same position as the target table's partition column.
|
||||
HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery.
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
INSERT INTO text_table (part_col) SELECT part_col isnull FROM text_table;
|
||||
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
|
||||
DETAIL: Subquery contains an expression that is not a simple column reference in the same position as the target table's partition column.
|
||||
HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery.
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
INSERT INTO text_table (part_col) SELECT part_col::text from char_table;
|
||||
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
|
||||
DETAIL: Subquery contains an explicit coercion in the same position as the target table's partition column.
|
||||
HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery.
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
INSERT INTO text_table (part_col) SELECT (part_col = 'burak') is true FROM text_table;
|
||||
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
|
||||
DETAIL: Subquery contains an expression that is not a simple column reference in the same position as the target table's partition column.
|
||||
HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery.
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
INSERT INTO text_table (part_col) SELECT val FROM text_table;
|
||||
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
|
||||
DETAIL: The data type of the target table's partition column should exactly match the data type of the corresponding simple column reference in the subquery.
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
INSERT INTO text_table (part_col) SELECT val::text FROM text_table;
|
||||
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
|
||||
DETAIL: Subquery contains an explicit coercion in the same position as the target table's partition column.
|
||||
HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery.
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
RESET client_min_messages;
|
||||
insert into table_with_starts_with_defaults (b,c) select b,c FROM table_with_starts_with_defaults;
|
||||
-- Test on partition column without native hash function
|
||||
|
@ -2187,6 +2224,7 @@ FROM
|
|||
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
user_id | value_1
|
||||
---------+---------
|
||||
1 | 1
|
||||
|
|
|
@ -66,7 +66,6 @@ WITH inserted_table AS (
|
|||
source_table_1
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *
|
||||
) SELECT * FROM inserted_table ORDER BY 1;
|
||||
DEBUG: data-modifying statements are not supported in the WITH clauses of distributed queries
|
||||
DEBUG: generating subplan 8_1 for CTE inserted_table: INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_2, col_3 FROM on_conflict.source_table_1 ON CONFLICT(col_1) DO UPDATE SET col_2 = excluded.col_2 RETURNING target_table.col_1, target_table.col_2
|
||||
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
|
||||
DETAIL: The target table's partition column should correspond to a partition column in the subquery.
|
||||
|
@ -113,7 +112,6 @@ WITH inserted_table AS (
|
|||
) as foo
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *
|
||||
) SELECT * FROM inserted_table ORDER BY 1;
|
||||
DEBUG: data-modifying statements are not supported in the WITH clauses of distributed queries
|
||||
DEBUG: generating subplan 14_1 for CTE inserted_table: INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_1, col_2 FROM (SELECT source_table_1.col_1, source_table_1.col_2, source_table_1.col_3 FROM on_conflict.source_table_1 LIMIT 5) foo ON CONFLICT(col_1) DO UPDATE SET col_2 = excluded.col_2 RETURNING target_table.col_1, target_table.col_2
|
||||
DEBUG: LIMIT clauses are not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('14_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) inserted_table ORDER BY col_1
|
||||
|
@ -150,7 +148,6 @@ WITH inserted_table AS (
|
|||
) as foo
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = 0 RETURNING *
|
||||
) SELECT * FROM inserted_table ORDER BY 1;
|
||||
DEBUG: data-modifying statements are not supported in the WITH clauses of distributed queries
|
||||
DEBUG: generating subplan 18_1 for CTE inserted_table: INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_1, col_2 FROM ((SELECT source_table_1.col_1, source_table_1.col_2, source_table_1.col_3 FROM on_conflict.source_table_1 LIMIT 5) UNION (SELECT source_table_2.col_1, source_table_2.col_2, source_table_2.col_3 FROM on_conflict.source_table_2 LIMIT 5)) foo ON CONFLICT(col_1) DO UPDATE SET col_2 = 0 RETURNING target_table.col_1, target_table.col_2
|
||||
DEBUG: Set operations are not allowed in distributed INSERT ... SELECT queries
|
||||
DEBUG: Plan 18 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('18_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) inserted_table ORDER BY col_1
|
||||
|
@ -244,7 +241,6 @@ WITH inserted_table AS (
|
|||
)
|
||||
INSERT INTO target_table SELECT * FROM cte_2 ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 + 1 RETURNING *
|
||||
) SELECT * FROM inserted_table ORDER BY 1;
|
||||
DEBUG: data-modifying statements are not supported in the WITH clauses of distributed queries
|
||||
DEBUG: generating subplan 37_1 for CTE inserted_table: WITH cte AS (SELECT source_table_1.col_1, source_table_1.col_2, source_table_1.col_3 FROM on_conflict.source_table_1), cte_2 AS (SELECT cte.col_1, cte.col_2 FROM cte) INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_1, col_2 FROM cte_2 ON CONFLICT(col_1) DO UPDATE SET col_2 = (excluded.col_2 OPERATOR(pg_catalog.+) 1) RETURNING target_table.col_1, target_table.col_2
|
||||
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
|
||||
DEBUG: Plan 37 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('37_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) inserted_table ORDER BY col_1
|
||||
|
@ -268,7 +264,6 @@ WITH cte AS (
|
|||
INSERT INTO target_table (SELECT * FROM basic) ON CONFLICT DO NOTHING RETURNING *
|
||||
)
|
||||
UPDATE target_table SET col_2 = 4 WHERE col_1 IN (SELECT col_1 FROM cte);
|
||||
DEBUG: common table expressions are not supported in distributed modifications
|
||||
DEBUG: generating subplan 42_1 for CTE cte: WITH basic AS (SELECT source_table_1.col_1, source_table_1.col_2 FROM on_conflict.source_table_1) INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_1, col_2 FROM basic ON CONFLICT DO NOTHING RETURNING target_table.col_1, target_table.col_2
|
||||
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
|
||||
DEBUG: Plan 42 query after replacing subqueries and CTEs: UPDATE on_conflict.target_table SET col_2 = 4 WHERE (col_1 OPERATOR(pg_catalog.=) ANY (SELECT cte.col_1 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('42_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte))
|
||||
|
|
|
@ -72,6 +72,7 @@ SET client_min_messages TO DEBUG2;
|
|||
-- The following query checks that we can correctly handle self-joins
|
||||
EXPLAIN SELECT l1.l_quantity FROM lineitem l1, lineitem l2
|
||||
WHERE l1.l_orderkey = l2.l_orderkey AND l1.l_quantity > 5;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
LOG: join order: [ "lineitem" ][ local partition join "lineitem" ]
|
||||
DEBUG: join prunable for intervals [1,5986] and [8997,14947]
|
||||
DEBUG: join prunable for intervals [8997,14947] and [1,5986]
|
||||
|
|
|
@ -8,6 +8,7 @@ SET citus.explain_distributed_queries TO off;
|
|||
SET client_min_messages TO DEBUG2;
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_orderkey = o_orderkey;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
DEBUG: join prunable for intervals [1,5986] and [8997,14947]
|
||||
DEBUG: join prunable for intervals [8997,14947] and [1,5986]
|
||||
sum | avg
|
||||
|
@ -17,6 +18,7 @@ DEBUG: join prunable for intervals [8997,14947] and [1,5986]
|
|||
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_orderkey = o_orderkey AND l_orderkey > 9030;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
DEBUG: join prunable for intervals [8997,14947] and [1,5986]
|
||||
sum | avg
|
||||
-------+--------------------
|
||||
|
@ -27,6 +29,7 @@ DEBUG: join prunable for intervals [8997,14947] and [1,5986]
|
|||
-- works as expected in this case.
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_orderkey = o_orderkey AND l_orderkey > 20000;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
sum | avg
|
||||
-----+-----
|
||||
|
|
||||
|
@ -37,6 +40,7 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
|||
-- out all the shards, and leave us with an empty task list.
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_orderkey = o_orderkey AND l_orderkey > 6000 AND o_orderkey < 6000;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
sum | avg
|
||||
-----+-----
|
||||
|
|
||||
|
@ -45,6 +49,7 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
|||
-- Make sure that we can handle filters without a column
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_orderkey = o_orderkey AND false;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
sum | avg
|
||||
-----+-----
|
||||
|
|
||||
|
@ -53,6 +58,7 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
|||
SELECT sum(l_linenumber), avg(l_linenumber)
|
||||
FROM lineitem INNER JOIN orders ON (l_orderkey = o_orderkey)
|
||||
WHERE false;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
sum | avg
|
||||
-----+-----
|
||||
|
|
||||
|
@ -65,6 +71,7 @@ SELECT sum(l_linenumber), avg(l_linenumber)
|
|||
EXPLAIN SELECT count(*)
|
||||
FROM array_partitioned_table table1, array_partitioned_table table2
|
||||
WHERE table1.array_column = table2.array_column;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
DEBUG: join prunable for intervals [{},{AZZXSP27F21T6,AZZXSP27F21T6}] and [{BA1000U2AMO4ZGX,BZZXSP27F21T6},{CA1000U2AMO4ZGX,CZZXSP27F21T6}]
|
||||
DEBUG: join prunable for intervals [{BA1000U2AMO4ZGX,BZZXSP27F21T6},{CA1000U2AMO4ZGX,CZZXSP27F21T6}] and [{},{AZZXSP27F21T6,AZZXSP27F21T6}]
|
||||
QUERY PLAN
|
||||
|
@ -77,6 +84,7 @@ DEBUG: join prunable for intervals [{BA1000U2AMO4ZGX,BZZXSP27F21T6},{CA1000U2AM
|
|||
EXPLAIN SELECT count(*)
|
||||
FROM composite_partitioned_table table1, composite_partitioned_table table2
|
||||
WHERE table1.composite_column = table2.composite_column;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
DEBUG: join prunable for intervals [(a,3,b),(b,4,c)] and [(c,5,d),(d,6,e)]
|
||||
DEBUG: join prunable for intervals [(c,5,d),(d,6,e)] and [(a,3,b),(b,4,c)]
|
||||
QUERY PLAN
|
||||
|
@ -90,6 +98,7 @@ DEBUG: join prunable for intervals [(c,5,d),(d,6,e)] and [(a,3,b),(b,4,c)]
|
|||
EXPLAIN SELECT count(*)
|
||||
FROM varchar_partitioned_table table1, varchar_partitioned_table table2
|
||||
WHERE table1.varchar_column = table2.varchar_column;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
DEBUG: join prunable for intervals [AA1000U2AMO4ZGX,AZZXSP27F21T6] and [BA1000U2AMO4ZGX,BZZXSP27F21T6]
|
||||
DEBUG: join prunable for intervals [BA1000U2AMO4ZGX,BZZXSP27F21T6] and [AA1000U2AMO4ZGX,AZZXSP27F21T6]
|
||||
QUERY PLAN
|
||||
|
|
|
@ -112,6 +112,7 @@ DEBUG: Plan is router executable
|
|||
(1 row)
|
||||
|
||||
SELECT * FROM append_partitioned WHERE id = 414123;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
DEBUG: Plan is router executable
|
||||
id | symbol | bidder_id | placed_at | kind | limit_price
|
||||
--------+--------+-----------+--------------------------+------+-------------
|
||||
|
|
|
@ -196,6 +196,7 @@ DETAIL: distribution column value: 1
|
|||
-- query is a single shard query but can't do shard pruning,
|
||||
-- not router-plannable due to <= and IN
|
||||
SELECT * FROM articles_hash_mx WHERE author_id <= 1;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
1 | 1 | arsenous | 9572
|
||||
|
@ -281,6 +282,7 @@ DEBUG: Plan is router executable
|
|||
WITH id_author AS ( SELECT id, author_id FROM articles_hash_mx WHERE author_id = 1),
|
||||
id_title AS (SELECT id, title from articles_hash_mx WHERE author_id = 2)
|
||||
SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id;
|
||||
DEBUG: cannot run command which targets multiple shards
|
||||
DEBUG: generating subplan 66_1 for CTE id_author: SELECT id, author_id FROM public.articles_hash_mx WHERE (author_id OPERATOR(pg_catalog.=) 1)
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -363,6 +365,7 @@ WITH RECURSIVE hierarchy as (
|
|||
ON (h.employee_id = ce.manager_id AND
|
||||
h.company_id = ce.company_id))
|
||||
SELECT * FROM hierarchy WHERE LEVEL <= 2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
ERROR: recursive CTEs are not supported in distributed queries
|
||||
-- logically wrong query, query involves different shards
|
||||
-- from the same table, but still router plannable due to
|
||||
|
@ -378,6 +381,7 @@ WITH RECURSIVE hierarchy as (
|
|||
h.company_id = ce.company_id AND
|
||||
ce.company_id = 2))
|
||||
SELECT * FROM hierarchy WHERE LEVEL <= 2;
|
||||
DEBUG: cannot run command which targets multiple shards
|
||||
ERROR: recursive CTEs are not supported in distributed queries
|
||||
-- grouping sets are supported on single shard
|
||||
SELECT
|
||||
|
@ -417,6 +421,7 @@ SELECT
|
|||
WHERE author_id = 1 or author_id = 2
|
||||
GROUP BY GROUPING SETS ((id),(subtitle))
|
||||
ORDER BY id, subtitle;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
ERROR: could not run distributed query with GROUPING SETS, CUBE, or ROLLUP
|
||||
HINT: Consider using an equality filter on the distributed table's partition column.
|
||||
-- queries which involve functions in FROM clause are supported if it goes to a single worker.
|
||||
|
@ -452,6 +457,7 @@ DEBUG: Plan is router executable
|
|||
|
||||
-- they are supported via (sub)query pushdown if multiple workers are involved
|
||||
SELECT * FROM articles_hash_mx, position('om' in 'Thomas') WHERE author_id = 1 or author_id = 2 ORDER BY 4 DESC, 1 DESC, 2 DESC LIMIT 5;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: push down of limit count: 5
|
||||
id | author_id | title | word_count | position
|
||||
----+-----------+------------+------------+----------
|
||||
|
@ -466,8 +472,11 @@ DEBUG: push down of limit count: 5
|
|||
SELECT articles_hash_mx.id,test.word_count
|
||||
FROM articles_hash_mx, (SELECT id, word_count FROM articles_hash_mx) AS test WHERE test.id = articles_hash_mx.id
|
||||
ORDER BY test.word_count DESC, articles_hash_mx.id LIMIT 5;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 85_1 for subquery SELECT id, word_count FROM public.articles_hash_mx
|
||||
DEBUG: Plan 85 query after replacing subqueries and CTEs: SELECT articles_hash_mx.id, test.word_count FROM public.articles_hash_mx, (SELECT intermediate_result.id, intermediate_result.word_count FROM read_intermediate_result('85_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, word_count integer)) test WHERE (test.id OPERATOR(pg_catalog.=) articles_hash_mx.id) ORDER BY test.word_count DESC, articles_hash_mx.id LIMIT 5
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: push down of limit count: 5
|
||||
id | word_count
|
||||
----+------------
|
||||
|
@ -482,6 +491,8 @@ SELECT articles_hash_mx.id,test.word_count
|
|||
FROM articles_hash_mx, (SELECT id, word_count FROM articles_hash_mx) AS test
|
||||
WHERE test.id = articles_hash_mx.id and articles_hash_mx.author_id = 1
|
||||
ORDER BY articles_hash_mx.id;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 87_1 for subquery SELECT id, word_count FROM public.articles_hash_mx
|
||||
DEBUG: Plan 87 query after replacing subqueries and CTEs: SELECT articles_hash_mx.id, test.word_count FROM public.articles_hash_mx, (SELECT intermediate_result.id, intermediate_result.word_count FROM read_intermediate_result('87_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, word_count integer)) test WHERE ((test.id OPERATOR(pg_catalog.=) articles_hash_mx.id) AND (articles_hash_mx.author_id OPERATOR(pg_catalog.=) 1)) ORDER BY articles_hash_mx.id
|
||||
DEBUG: Creating router plan
|
||||
|
@ -499,6 +510,7 @@ DETAIL: distribution column value: 1
|
|||
-- subqueries are not supported in SELECT clause
|
||||
SELECT a.title AS name, (SELECT a2.id FROM articles_single_shard_hash_mx a2 WHERE a.id = a2.id LIMIT 1)
|
||||
AS special_price FROM articles_hash_mx a;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: skipping recursive planning for the subquery since it contains references to outer queries
|
||||
ERROR: could not run distributed query with subquery outside the FROM and WHERE clauses
|
||||
HINT: Consider using an equality filter on the distributed table's partition column.
|
||||
|
@ -538,6 +550,7 @@ DEBUG: Plan is router executable
|
|||
SELECT *
|
||||
FROM articles_hash_mx
|
||||
WHERE author_id = 1 OR author_id = 18;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
1 | 1 | arsenous | 9572
|
||||
|
@ -604,6 +617,7 @@ SELECT a.author_id as first_author, b.word_count as second_word_count
|
|||
WHERE a.author_id = 2 and a.author_id = b.author_id
|
||||
LIMIT 3;
|
||||
DEBUG: Found no worker with all shard placements
|
||||
DEBUG: found no worker with all shard placements
|
||||
DEBUG: generating subplan 96_1 for CTE single_shard: SELECT id, author_id, title, word_count FROM public.articles_single_shard_hash_mx
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -889,6 +903,7 @@ DETAIL: distribution column value: 1
|
|||
SELECT *
|
||||
FROM articles_hash_mx
|
||||
WHERE author_id = 1 or id = 1;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
1 | 1 | arsenous | 9572
|
||||
|
@ -927,6 +942,7 @@ DETAIL: distribution column value: 1
|
|||
SELECT *
|
||||
FROM articles_hash_mx
|
||||
WHERE author_id = (random()::int * 0 + 1);
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
1 | 1 | arsenous | 9572
|
||||
|
@ -940,6 +956,7 @@ SELECT *
|
|||
SELECT *
|
||||
FROM articles_hash_mx
|
||||
WHERE author_id = 1 or id = 1;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
1 | 1 | arsenous | 9572
|
||||
|
@ -970,6 +987,7 @@ DETAIL: distribution column value: 1
|
|||
SELECT *
|
||||
FROM articles_hash_mx
|
||||
WHERE 1 = abs(author_id);
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
1 | 1 | arsenous | 9572
|
||||
|
@ -983,6 +1001,7 @@ SELECT *
|
|||
SELECT *
|
||||
FROM articles_hash_mx
|
||||
WHERE author_id = abs(author_id - 2);
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
1 | 1 | arsenous | 9572
|
||||
|
@ -1008,6 +1027,7 @@ DETAIL: distribution column value: 1
|
|||
SELECT *
|
||||
FROM articles_hash_mx
|
||||
WHERE (author_id = 1) is true;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
1 | 1 | arsenous | 9572
|
||||
|
@ -1183,11 +1203,13 @@ DETAIL: distribution column value: 1
|
|||
SELECT id, MIN(id) over (order by word_count)
|
||||
FROM articles_hash_mx
|
||||
WHERE author_id = 1 or author_id = 2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
ERROR: could not run distributed query because the window function that is used cannot be pushed down
|
||||
HINT: Window functions are supported in two ways. Either add an equality filter on the distributed tables' partition column or use the window functions with a PARTITION BY clause containing the distribution column
|
||||
SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count
|
||||
FROM articles_hash_mx
|
||||
WHERE author_id = 5 or author_id = 2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
ERROR: could not run distributed query because the window function that is used cannot be pushed down
|
||||
HINT: Window functions are supported in two ways. Either add an equality filter on the distributed tables' partition column or use the window functions with a PARTITION BY clause containing the distribution column
|
||||
-- complex query hitting a single shard
|
||||
|
@ -1227,6 +1249,7 @@ SELECT
|
|||
GROUP BY
|
||||
author_id
|
||||
ORDER BY c;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
c
|
||||
---
|
||||
4
|
||||
|
@ -1329,6 +1352,7 @@ DETAIL: distribution column value: 1
|
|||
SELECT count(*), count(*) FILTER (WHERE id < 3)
|
||||
FROM articles_hash_mx
|
||||
WHERE author_id = 1 or author_id = 2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
count | count
|
||||
-------+-------
|
||||
10 | 2
|
||||
|
@ -1449,6 +1473,7 @@ DROP MATERIALIZED VIEW mv_articles_hash_mx;
|
|||
SET client_min_messages to 'DEBUG2';
|
||||
CREATE MATERIALIZED VIEW mv_articles_hash_mx_error AS
|
||||
SELECT * FROM articles_hash_mx WHERE author_id in (1,2);
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
|
||||
-- router planner/executor is disabled for task-tracker executor
|
||||
-- following query is router plannable, but router planner is disabled
|
||||
|
|
|
@ -36,6 +36,7 @@ SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290001;
|
|||
-- Adding l_orderkey = 1 to make the query not router executable
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
LOG: join order: [ "lineitem" ]
|
||||
QUERY PLAN
|
||||
-----------------------------------------------------------------------
|
||||
|
@ -65,6 +66,7 @@ LOG: join order: [ "lineitem" ]
|
|||
EXPLAIN (COSTS FALSE)
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_orderkey = o_orderkey;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
LOG: join order: [ "lineitem" ][ local partition join "orders" ]
|
||||
DEBUG: join prunable for intervals [1,5986] and [8997,14947]
|
||||
DEBUG: join prunable for intervals [8997,14947] and [1,5986]
|
||||
|
@ -98,6 +100,7 @@ DEBUG: join prunable for intervals [8997,14947] and [1,5986]
|
|||
UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 290000;
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
LOG: join order: [ "lineitem" ]
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------------------
|
||||
|
@ -117,6 +120,7 @@ LOG: join order: [ "lineitem" ]
|
|||
EXPLAIN (COSTS FALSE)
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_partkey = o_custkey;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
LOG: join order: [ "lineitem" ][ dual partition join "orders" ]
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
|
@ -168,6 +172,7 @@ HINT: Since you enabled citus.enable_repartition_joins Citus chose to use task-
|
|||
UPDATE pg_dist_shard SET shardmaxvalue = NULL WHERE shardid = 290001;
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
LOG: join order: [ "lineitem" ]
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------------------
|
||||
|
@ -187,6 +192,7 @@ LOG: join order: [ "lineitem" ]
|
|||
EXPLAIN (COSTS FALSE)
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_partkey = o_custkey;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
LOG: join order: [ "lineitem" ][ dual partition join "orders" ]
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
|
@ -238,6 +244,7 @@ HINT: Since you enabled citus.enable_repartition_joins Citus chose to use task-
|
|||
UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = 290000;
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
LOG: join order: [ "lineitem" ]
|
||||
DEBUG: Plan is router executable
|
||||
QUERY PLAN
|
||||
|
@ -254,6 +261,7 @@ DEBUG: Plan is router executable
|
|||
EXPLAIN (COSTS FALSE)
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_partkey = o_custkey;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
LOG: join order: [ "lineitem" ][ dual partition join "orders" ]
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
|
|
|
@ -36,6 +36,7 @@ SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290001;
|
|||
-- Adding l_orderkey = 1 to make the query not router executable
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
LOG: join order: [ "lineitem" ]
|
||||
QUERY PLAN
|
||||
-----------------------------------------------------------------------
|
||||
|
@ -65,6 +66,7 @@ LOG: join order: [ "lineitem" ]
|
|||
EXPLAIN (COSTS FALSE)
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_orderkey = o_orderkey;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
LOG: join order: [ "lineitem" ][ local partition join "orders" ]
|
||||
DEBUG: join prunable for intervals [1,5986] and [8997,14947]
|
||||
DEBUG: join prunable for intervals [8997,14947] and [1,5986]
|
||||
|
@ -98,6 +100,7 @@ DEBUG: join prunable for intervals [8997,14947] and [1,5986]
|
|||
UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 290000;
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
LOG: join order: [ "lineitem" ]
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------------------
|
||||
|
@ -117,6 +120,7 @@ LOG: join order: [ "lineitem" ]
|
|||
EXPLAIN (COSTS FALSE)
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_partkey = o_custkey;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
LOG: join order: [ "lineitem" ][ dual partition join "orders" ]
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
|
@ -168,6 +172,7 @@ HINT: Since you enabled citus.enable_repartition_joins Citus chose to use task-
|
|||
UPDATE pg_dist_shard SET shardmaxvalue = NULL WHERE shardid = 290001;
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
LOG: join order: [ "lineitem" ]
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------------------
|
||||
|
@ -187,6 +192,7 @@ LOG: join order: [ "lineitem" ]
|
|||
EXPLAIN (COSTS FALSE)
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_partkey = o_custkey;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
LOG: join order: [ "lineitem" ][ dual partition join "orders" ]
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
|
@ -238,6 +244,7 @@ HINT: Since you enabled citus.enable_repartition_joins Citus chose to use task-
|
|||
UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = 290000;
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
LOG: join order: [ "lineitem" ]
|
||||
DEBUG: Plan is router executable
|
||||
QUERY PLAN
|
||||
|
@ -254,6 +261,7 @@ DEBUG: Plan is router executable
|
|||
EXPLAIN (COSTS FALSE)
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_partkey = o_custkey;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
LOG: join order: [ "lineitem" ][ dual partition join "orders" ]
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
|
|
|
@ -40,6 +40,7 @@ GROUP BY
|
|||
l_partkey, o_orderkey
|
||||
ORDER BY
|
||||
l_partkey, o_orderkey;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
DEBUG: join prunable for intervals [1,5986] and [8997,14947]
|
||||
DEBUG: join prunable for intervals [8997,14947] and [1,5986]
|
||||
DEBUG: generated sql query for task 1
|
||||
|
@ -131,6 +132,7 @@ GROUP BY
|
|||
l_partkey, o_orderkey
|
||||
ORDER BY
|
||||
l_partkey, o_orderkey;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
DEBUG: generated sql query for task 1
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290000 lineitem WHERE (l_quantity OPERATOR(pg_catalog.<) 5.0)"
|
||||
DEBUG: generated sql query for task 2
|
||||
|
|
|
@ -15,6 +15,7 @@ FROM
|
|||
orders, customer_append
|
||||
WHERE
|
||||
o_custkey = c_custkey;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
DEBUG: join prunable for intervals [1,1000] and [1001,2000]
|
||||
DEBUG: join prunable for intervals [1,1000] and [6001,7000]
|
||||
DEBUG: join prunable for intervals [1001,2000] and [1,1000]
|
||||
|
@ -44,6 +45,7 @@ FROM
|
|||
orders, customer_append
|
||||
WHERE
|
||||
o_custkey = c_custkey;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
DEBUG: join prunable for intervals [1,1000] and [1001,2000]
|
||||
DEBUG: join prunable for intervals [1,1000] and [6001,7000]
|
||||
DEBUG: join prunable for intervals [1001,2000] and [1,1000]
|
||||
|
@ -71,6 +73,7 @@ FROM
|
|||
WHERE
|
||||
o_custkey = c_custkey AND
|
||||
o_orderkey < 0;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------
|
||||
Aggregate
|
||||
|
@ -89,6 +92,7 @@ FROM
|
|||
WHERE
|
||||
o_custkey = c_custkey AND
|
||||
o_orderkey < 0;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
count
|
||||
-------
|
||||
0
|
||||
|
@ -104,6 +108,7 @@ FROM
|
|||
WHERE
|
||||
o_custkey = c_custkey AND
|
||||
c_custkey < 0;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------
|
||||
Aggregate
|
||||
|
@ -122,6 +127,7 @@ FROM
|
|||
WHERE
|
||||
o_custkey = c_custkey AND
|
||||
c_custkey < 0;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
count
|
||||
-------
|
||||
0
|
||||
|
@ -137,6 +143,7 @@ FROM
|
|||
lineitem, customer_append
|
||||
WHERE
|
||||
l_partkey = c_nationkey;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
DEBUG: join prunable for task partitionId 0 and 3
|
||||
|
@ -185,6 +192,7 @@ FROM
|
|||
lineitem, customer_append
|
||||
WHERE
|
||||
l_partkey = c_nationkey;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
DEBUG: join prunable for task partitionId 0 and 3
|
||||
|
@ -228,6 +236,7 @@ FROM
|
|||
WHERE
|
||||
l_partkey = c_nationkey AND
|
||||
l_orderkey < 0;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------
|
||||
Aggregate
|
||||
|
@ -249,6 +258,7 @@ FROM
|
|||
WHERE
|
||||
l_partkey = c_nationkey AND
|
||||
l_orderkey < 0;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
count
|
||||
-------
|
||||
0
|
||||
|
@ -262,6 +272,7 @@ FROM
|
|||
orders INNER JOIN customer_append ON (o_custkey = c_custkey)
|
||||
WHERE
|
||||
false;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------
|
||||
Custom Scan (Citus Task-Tracker)
|
||||
|
@ -279,6 +290,7 @@ FROM
|
|||
orders INNER JOIN customer_append ON (o_custkey = c_custkey)
|
||||
WHERE
|
||||
false;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
o_orderkey
|
||||
------------
|
||||
(0 rows)
|
||||
|
@ -290,6 +302,7 @@ FROM
|
|||
orders INNER JOIN customer_append ON (o_custkey = c_custkey)
|
||||
WHERE
|
||||
1=0 AND c_custkey < 0;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------
|
||||
Custom Scan (Citus Task-Tracker)
|
||||
|
@ -305,6 +318,7 @@ SELECT
|
|||
o_orderkey
|
||||
FROM
|
||||
orders INNER JOIN customer_append ON (o_custkey = c_custkey AND false);
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
QUERY PLAN
|
||||
----------------------------------
|
||||
Custom Scan (Citus Task-Tracker)
|
||||
|
@ -319,6 +333,7 @@ FROM
|
|||
orders, customer_append
|
||||
WHERE
|
||||
o_custkey = c_custkey AND false;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
QUERY PLAN
|
||||
----------------------------------
|
||||
Custom Scan (Citus Task-Tracker)
|
||||
|
|
|
@ -25,6 +25,7 @@ FROM
|
|||
orders, customer_append
|
||||
WHERE
|
||||
o_custkey = c_custkey;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
DEBUG: join prunable for intervals [1,1000] and [1001,2000]
|
||||
|
@ -58,6 +59,7 @@ FROM
|
|||
WHERE
|
||||
o_custkey = c_custkey AND
|
||||
o_orderkey = l_orderkey;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 1 to node localhost:57637
|
||||
|
@ -82,6 +84,7 @@ FROM
|
|||
lineitem, customer_append
|
||||
WHERE
|
||||
l_partkey = c_nationkey;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
|
|
|
@ -256,6 +256,7 @@ DETAIL: distribution column value: 1
|
|||
-- query is a single shard query but can't do shard pruning,
|
||||
-- not router-plannable due to <= and IN
|
||||
SELECT * FROM articles_hash WHERE author_id <= 1;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
1 | 1 | arsenous | 9572
|
||||
|
@ -341,6 +342,7 @@ DEBUG: Plan is router executable
|
|||
WITH id_author AS ( SELECT id, author_id FROM articles_hash WHERE author_id = 1),
|
||||
id_title AS (SELECT id, title from articles_hash WHERE author_id = 2)
|
||||
SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id;
|
||||
DEBUG: cannot run command which targets multiple shards
|
||||
DEBUG: generating subplan 67_1 for CTE id_author: SELECT id, author_id FROM public.articles_hash WHERE (author_id OPERATOR(pg_catalog.=) 1)
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -436,6 +438,7 @@ WITH RECURSIVE hierarchy as (
|
|||
ON (h.employee_id = ce.manager_id AND
|
||||
h.company_id = ce.company_id))
|
||||
SELECT * FROM hierarchy WHERE LEVEL <= 2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
ERROR: recursive CTEs are not supported in distributed queries
|
||||
-- logically wrong query, query involves different shards
|
||||
-- from the same table
|
||||
|
@ -450,6 +453,7 @@ WITH RECURSIVE hierarchy as (
|
|||
h.company_id = ce.company_id AND
|
||||
ce.company_id = 2))
|
||||
SELECT * FROM hierarchy WHERE LEVEL <= 2;
|
||||
DEBUG: cannot run command which targets multiple shards
|
||||
ERROR: recursive CTEs are not supported in distributed queries
|
||||
-- Test router modifying CTEs
|
||||
WITH new_article AS (
|
||||
|
@ -560,6 +564,7 @@ SELECT
|
|||
WHERE author_id = 1 or author_id = 2
|
||||
GROUP BY GROUPING SETS ((id),(subtitle))
|
||||
ORDER BY id, subtitle;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
ERROR: could not run distributed query with GROUPING SETS, CUBE, or ROLLUP
|
||||
HINT: Consider using an equality filter on the distributed table's partition column.
|
||||
-- queries which involve functions in FROM clause are supported if it goes to a single worker.
|
||||
|
@ -595,6 +600,7 @@ DEBUG: Plan is router executable
|
|||
|
||||
-- they are supported via (sub)query pushdown if multiple workers are involved
|
||||
SELECT * FROM articles_hash, position('om' in 'Thomas') WHERE author_id = 1 or author_id = 2 ORDER BY 4 DESC, 1 DESC, 2 DESC LIMIT 5;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: push down of limit count: 5
|
||||
id | author_id | title | word_count | position
|
||||
----+-----------+------------+------------+----------
|
||||
|
@ -625,8 +631,11 @@ DETAIL: distribution column value: 2
|
|||
SELECT articles_hash.id,test.word_count
|
||||
FROM articles_hash, (SELECT id, word_count FROM articles_hash) AS test WHERE test.id = articles_hash.id
|
||||
ORDER BY test.word_count DESC, articles_hash.id LIMIT 5;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 93_1 for subquery SELECT id, word_count FROM public.articles_hash
|
||||
DEBUG: Plan 93 query after replacing subqueries and CTEs: SELECT articles_hash.id, test.word_count FROM public.articles_hash, (SELECT intermediate_result.id, intermediate_result.word_count FROM read_intermediate_result('93_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, word_count integer)) test WHERE (test.id OPERATOR(pg_catalog.=) articles_hash.id) ORDER BY test.word_count DESC, articles_hash.id LIMIT 5
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: push down of limit count: 5
|
||||
id | word_count
|
||||
----+------------
|
||||
|
@ -641,6 +650,8 @@ SELECT articles_hash.id,test.word_count
|
|||
FROM articles_hash, (SELECT id, word_count FROM articles_hash) AS test
|
||||
WHERE test.id = articles_hash.id and articles_hash.author_id = 1
|
||||
ORDER BY articles_hash.id;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 95_1 for subquery SELECT id, word_count FROM public.articles_hash
|
||||
DEBUG: Plan 95 query after replacing subqueries and CTEs: SELECT articles_hash.id, test.word_count FROM public.articles_hash, (SELECT intermediate_result.id, intermediate_result.word_count FROM read_intermediate_result('95_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, word_count integer)) test WHERE ((test.id OPERATOR(pg_catalog.=) articles_hash.id) AND (articles_hash.author_id OPERATOR(pg_catalog.=) 1)) ORDER BY articles_hash.id
|
||||
DEBUG: Creating router plan
|
||||
|
@ -658,6 +669,7 @@ DETAIL: distribution column value: 1
|
|||
-- subqueries are not supported in SELECT clause
|
||||
SELECT a.title AS name, (SELECT a2.id FROM articles_single_shard_hash a2 WHERE a.id = a2.id LIMIT 1)
|
||||
AS special_price FROM articles_hash a;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: skipping recursive planning for the subquery since it contains references to outer queries
|
||||
ERROR: could not run distributed query with subquery outside the FROM and WHERE clauses
|
||||
HINT: Consider using an equality filter on the distributed table's partition column.
|
||||
|
@ -698,6 +710,7 @@ SELECT *
|
|||
FROM articles_hash
|
||||
WHERE author_id = 1 OR author_id = 18
|
||||
ORDER BY 4 DESC, 3 DESC, 2 DESC, 1 DESC;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
41 | 1 | aznavour | 11814
|
||||
|
@ -764,6 +777,7 @@ SELECT a.author_id as first_author, b.word_count as second_word_count
|
|||
WHERE a.author_id = 2 and a.author_id = b.author_id
|
||||
LIMIT 3;
|
||||
DEBUG: Found no worker with all shard placements
|
||||
DEBUG: found no worker with all shard placements
|
||||
DEBUG: generating subplan 104_1 for CTE single_shard: SELECT id, author_id, title, word_count FROM public.articles_single_shard_hash
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -1070,6 +1084,7 @@ DETAIL: distribution column value: 1
|
|||
SELECT *
|
||||
FROM articles_hash
|
||||
WHERE author_id = 1 or id = 1;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
1 | 1 | arsenous | 9572
|
||||
|
@ -1108,6 +1123,7 @@ DETAIL: distribution column value: 1
|
|||
SELECT *
|
||||
FROM articles_hash
|
||||
WHERE author_id = (random()::int * 0 + 1);
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
1 | 1 | arsenous | 9572
|
||||
|
@ -1121,6 +1137,7 @@ SELECT *
|
|||
SELECT *
|
||||
FROM articles_hash
|
||||
WHERE author_id = 1 or id = 1;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
1 | 1 | arsenous | 9572
|
||||
|
@ -1151,6 +1168,7 @@ DETAIL: distribution column value: 1
|
|||
SELECT *
|
||||
FROM articles_hash
|
||||
WHERE 1 = abs(author_id);
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
1 | 1 | arsenous | 9572
|
||||
|
@ -1164,6 +1182,7 @@ SELECT *
|
|||
SELECT *
|
||||
FROM articles_hash
|
||||
WHERE author_id = abs(author_id - 2);
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
1 | 1 | arsenous | 9572
|
||||
|
@ -1189,6 +1208,7 @@ DETAIL: distribution column value: 1
|
|||
SELECT *
|
||||
FROM articles_hash
|
||||
WHERE (author_id = 1) is true;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
1 | 1 | arsenous | 9572
|
||||
|
@ -1364,11 +1384,13 @@ DETAIL: distribution column value: 1
|
|||
SELECT id, MIN(id) over (order by word_count)
|
||||
FROM articles_hash
|
||||
WHERE author_id = 1 or author_id = 2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
ERROR: could not run distributed query because the window function that is used cannot be pushed down
|
||||
HINT: Window functions are supported in two ways. Either add an equality filter on the distributed tables' partition column or use the window functions with a PARTITION BY clause containing the distribution column
|
||||
SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count
|
||||
FROM articles_hash
|
||||
WHERE author_id = 5 or author_id = 2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
ERROR: could not run distributed query because the window function that is used cannot be pushed down
|
||||
HINT: Window functions are supported in two ways. Either add an equality filter on the distributed tables' partition column or use the window functions with a PARTITION BY clause containing the distribution column
|
||||
-- where false queries are router plannable
|
||||
|
@ -1465,6 +1487,7 @@ DEBUG: Plan is router executable
|
|||
SELECT *
|
||||
FROM articles_hash a
|
||||
WHERE a.author_id is null;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
id | author_id | title | word_count
|
||||
----+-----------+-------+------------
|
||||
(0 rows)
|
||||
|
@ -1642,6 +1665,7 @@ SELECT author_id FROM articles_hash
|
|||
ORDER BY
|
||||
author_id
|
||||
LIMIT 1;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: push down of limit count: 1
|
||||
author_id
|
||||
-----------
|
||||
|
@ -1740,6 +1764,7 @@ DEBUG: Plan is router executable
|
|||
SET citus.task_executor_type to "task-tracker";
|
||||
SELECT * FROM articles_range ar join authors_range au on (ar.title = au.name)
|
||||
WHERE ar.author_id = 35;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
DEBUG: join prunable for task partitionId 0 and 3
|
||||
|
@ -1777,6 +1802,7 @@ DETAIL: Creating dependency on merge taskId 20
|
|||
-- change the join columns.
|
||||
SELECT * FROM articles_range ar join authors_range au on (ar.title = au.name)
|
||||
WHERE ar.author_id = 1 or au.id = 5;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
DEBUG: join prunable for task partitionId 0 and 3
|
||||
|
@ -1836,6 +1862,7 @@ DETAIL: distribution column value: 2
|
|||
SELECT * FROM articles_hash ar join authors_range au on (ar.author_id = au.id)
|
||||
WHERE ar.author_id = 3;
|
||||
DEBUG: Found no worker with all shard placements
|
||||
DEBUG: found no worker with all shard placements
|
||||
DEBUG: join prunable for intervals [1,10] and [11,30]
|
||||
DEBUG: join prunable for intervals [1,10] and [21,40]
|
||||
DEBUG: join prunable for intervals [1,10] and [31,40]
|
||||
|
@ -1883,6 +1910,7 @@ DEBUG: Plan is router executable
|
|||
-- it is not router plannable if hit multiple shards
|
||||
SELECT * FROM articles_range ar join authors_reference au on (ar.author_id = au.id)
|
||||
WHERE ar.author_id = 1 or ar.author_id = 15;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
id | author_id | title | word_count | name | id
|
||||
----+-----------+-------+------------+------+----
|
||||
(0 rows)
|
||||
|
@ -1906,6 +1934,7 @@ SELECT author_id FROM articles_append
|
|||
ORDER BY
|
||||
author_id
|
||||
LIMIT 1;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
DEBUG: push down of limit count: 1
|
||||
WARNING: relation "public.articles_append" does not exist
|
||||
CONTEXT: while executing command on localhost:57638
|
||||
|
@ -1921,6 +1950,7 @@ SELECT author_id FROM articles_append
|
|||
ORDER BY
|
||||
author_id
|
||||
LIMIT 1;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
DEBUG: push down of limit count: 1
|
||||
WARNING: relation "public.articles_append" does not exist
|
||||
CONTEXT: while executing command on localhost:57638
|
||||
|
@ -1969,6 +1999,7 @@ SELECT * FROM articles_hash
|
|||
ORDER BY
|
||||
author_id, id
|
||||
LIMIT 5;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: push down of limit count: 5
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
|
@ -2033,6 +2064,7 @@ SELECT
|
|||
GROUP BY
|
||||
author_id
|
||||
ORDER BY c;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
c
|
||||
---
|
||||
4
|
||||
|
@ -2163,6 +2195,7 @@ DETAIL: distribution column value: 1
|
|||
SELECT count(*), count(*) FILTER (WHERE id < 3)
|
||||
FROM articles_hash
|
||||
WHERE author_id = 1 or author_id = 2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
count | count
|
||||
-------+-------
|
||||
10 | 2
|
||||
|
@ -2280,6 +2313,7 @@ SELECT * FROM mv_articles_hash_empty;
|
|||
|
||||
CREATE MATERIALIZED VIEW mv_articles_hash_data AS
|
||||
SELECT * FROM articles_hash WHERE author_id in (1,2);
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
SELECT * FROM mv_articles_hash_data ORDER BY 1, 2, 3, 4;
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
|
|
|
@ -162,6 +162,7 @@ DETAIL: distribution column value: 1
|
|||
|
||||
-- fast path planner only support = operator
|
||||
SELECT * FROM articles_hash WHERE author_id <= 1;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
1 | 1 | arsenous | 9572
|
||||
|
@ -224,6 +225,7 @@ DETAIL: distribution column value: 1
|
|||
WITH id_author AS ( SELECT id, author_id FROM articles_hash WHERE author_id = 1),
|
||||
id_title AS (SELECT id, title from articles_hash WHERE author_id = 2)
|
||||
SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id;
|
||||
DEBUG: cannot run command which targets multiple shards
|
||||
DEBUG: generating subplan 12_1 for CTE id_author: SELECT id, author_id FROM fast_path_router_select.articles_hash WHERE (author_id OPERATOR(pg_catalog.=) 1)
|
||||
DEBUG: Distributed planning for a fast-path router query
|
||||
DEBUG: Creating router plan
|
||||
|
@ -378,6 +380,7 @@ SELECT
|
|||
WHERE author_id = 1 or author_id = 2
|
||||
GROUP BY GROUPING SETS ((id),(subtitle))
|
||||
ORDER BY id, subtitle;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
ERROR: could not run distributed query with GROUPING SETS, CUBE, or ROLLUP
|
||||
HINT: Consider using an equality filter on the distributed table's partition column.
|
||||
-- queries which involve functions in FROM clause are not supported via fast path planning
|
||||
|
@ -414,8 +417,11 @@ DETAIL: distribution column value: 2
|
|||
SELECT articles_hash.id,test.word_count
|
||||
FROM articles_hash, (SELECT id, word_count FROM articles_hash) AS test WHERE test.id = articles_hash.id
|
||||
ORDER BY test.word_count DESC, articles_hash.id LIMIT 5;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 32_1 for subquery SELECT id, word_count FROM fast_path_router_select.articles_hash
|
||||
DEBUG: Plan 32 query after replacing subqueries and CTEs: SELECT articles_hash.id, test.word_count FROM fast_path_router_select.articles_hash, (SELECT intermediate_result.id, intermediate_result.word_count FROM read_intermediate_result('32_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, word_count integer)) test WHERE (test.id OPERATOR(pg_catalog.=) articles_hash.id) ORDER BY test.word_count DESC, articles_hash.id LIMIT 5
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: push down of limit count: 5
|
||||
id | word_count
|
||||
----+------------
|
||||
|
@ -430,6 +436,8 @@ SELECT articles_hash.id,test.word_count
|
|||
FROM articles_hash, (SELECT id, word_count FROM articles_hash) AS test
|
||||
WHERE test.id = articles_hash.id and articles_hash.author_id = 1
|
||||
ORDER BY articles_hash.id;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 34_1 for subquery SELECT id, word_count FROM fast_path_router_select.articles_hash
|
||||
DEBUG: Plan 34 query after replacing subqueries and CTEs: SELECT articles_hash.id, test.word_count FROM fast_path_router_select.articles_hash, (SELECT intermediate_result.id, intermediate_result.word_count FROM read_intermediate_result('34_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, word_count integer)) test WHERE ((test.id OPERATOR(pg_catalog.=) articles_hash.id) AND (articles_hash.author_id OPERATOR(pg_catalog.=) 1)) ORDER BY articles_hash.id
|
||||
DEBUG: Creating router plan
|
||||
|
@ -447,6 +455,7 @@ DETAIL: distribution column value: 1
|
|||
-- subqueries are not supported in SELECT clause
|
||||
SELECT a.title AS name, (SELECT a2.id FROM articles_hash a2 WHERE a.id = a2.id LIMIT 1)
|
||||
AS special_price FROM articles_hash a;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: skipping recursive planning for the subquery since it contains references to outer queries
|
||||
ERROR: could not run distributed query with subquery outside the FROM and WHERE clauses
|
||||
HINT: Consider using an equality filter on the distributed table's partition column.
|
||||
|
@ -751,6 +760,7 @@ DETAIL: distribution column value: 1
|
|||
SELECT *
|
||||
FROM articles_hash
|
||||
WHERE author_id = 1 or id = 1;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
1 | 1 | arsenous | 9572
|
||||
|
@ -782,6 +792,7 @@ DETAIL: distribution column value: 1
|
|||
SELECT *
|
||||
FROM articles_hash
|
||||
WHERE author_id = 1 and id = 1 or id = 41;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
id | author_id | title | word_count
|
||||
----+-----------+----------+------------
|
||||
1 | 1 | arsenous | 9572
|
||||
|
@ -806,6 +817,7 @@ DETAIL: distribution column value: 1
|
|||
SELECT *
|
||||
FROM articles_hash
|
||||
WHERE author_id = (random()::int * 0 + 1);
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
1 | 1 | arsenous | 9572
|
||||
|
@ -838,6 +850,7 @@ DETAIL: distribution column value: 1
|
|||
SELECT *
|
||||
FROM articles_hash
|
||||
WHERE 1 = abs(author_id);
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
1 | 1 | arsenous | 9572
|
||||
|
@ -852,6 +865,7 @@ SELECT *
|
|||
SELECT *
|
||||
FROM articles_hash
|
||||
WHERE author_id = abs(author_id - 2);
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
1 | 1 | arsenous | 9572
|
||||
|
@ -879,6 +893,7 @@ DETAIL: distribution column value: 1
|
|||
SELECT *
|
||||
FROM articles_hash
|
||||
WHERE (author_id = 1) is true;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
1 | 1 | arsenous | 9572
|
||||
|
@ -916,18 +931,21 @@ DETAIL: distribution column value: 15
|
|||
(1 row)
|
||||
|
||||
SELECT count(*) FROM articles_hash WHERE (author_id = 15) OR (id = 1 AND word_count > 5);
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
count
|
||||
-------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM articles_hash WHERE (id = 15) OR (author_id = 1 AND word_count > 5);
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
count
|
||||
-------
|
||||
6
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM articles_hash WHERE (id = 15) AND (author_id = 1 OR word_count > 5);
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
count
|
||||
-------
|
||||
1
|
||||
|
@ -944,6 +962,7 @@ DETAIL: distribution column value: 1
|
|||
(1 row)
|
||||
|
||||
SELECT count(*) FROM articles_hash WHERE (id = 15) AND (title ilike 'a%' AND (word_count > 5 OR author_id = 2));
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
count
|
||||
-------
|
||||
1
|
||||
|
@ -1229,6 +1248,7 @@ DETAIL: distribution column value: 10
|
|||
SELECT *
|
||||
FROM articles_hash a
|
||||
WHERE a.author_id is null;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
id | author_id | title | word_count
|
||||
----+-----------+-------+------------
|
||||
(0 rows)
|
||||
|
@ -1604,6 +1624,7 @@ DETAIL: distribution column value: 1
|
|||
(5 rows)
|
||||
|
||||
EXECUTE author_articles(1);
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Distributed planning for a fast-path router query
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -1728,6 +1749,7 @@ DEBUG: Plan is router executable
|
|||
(1 row)
|
||||
|
||||
SELECT author_articles_max_id(1);
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Distributed planning for a fast-path router query
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -1887,6 +1909,7 @@ DEBUG: Plan is router executable
|
|||
(5 rows)
|
||||
|
||||
SELECT * FROM author_articles_id_word_count(1);
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Distributed planning for a fast-path router query
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -2006,6 +2029,7 @@ DETAIL: distribution column value: 5
|
|||
(1 row)
|
||||
|
||||
EXECUTE fast_path_agg_filter(6,6);
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Distributed planning for a fast-path router query
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
|
|
@ -447,6 +447,7 @@ DEBUG: Plan is router executable
|
|||
SELECT *
|
||||
FROM articles
|
||||
WHERE author_id = 1 OR author_id = 18;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
1 | 1 | arsenous | 9572
|
||||
|
@ -573,6 +574,7 @@ DETAIL: distribution column value: 2
|
|||
SELECT *
|
||||
FROM articles a, articles b
|
||||
WHERE a.id = b.id AND a.author_id = 1;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
DEBUG: join prunable for task partitionId 0 and 3
|
||||
|
@ -614,6 +616,7 @@ SELECT count(*) FROM (
|
|||
xmin IS NOT NULL OR
|
||||
xmax IS NOT NULL
|
||||
) x;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
count
|
||||
-------
|
||||
50
|
||||
|
|
|
@ -391,6 +391,7 @@ DEBUG: Plan is router executable
|
|||
SELECT *
|
||||
FROM articles
|
||||
WHERE author_id = 1 OR author_id = 18;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
1 | 1 | arsenous | 9572
|
||||
|
@ -517,6 +518,7 @@ DETAIL: distribution column value: 2
|
|||
SELECT *
|
||||
FROM articles a, articles b
|
||||
WHERE a.id = b.id AND a.author_id = 1;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
DEBUG: join prunable for task partitionId 0 and 3
|
||||
|
@ -558,6 +560,7 @@ SELECT count(*) FROM (
|
|||
xmin IS NOT NULL OR
|
||||
xmax IS NOT NULL
|
||||
) x;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
count
|
||||
-------
|
||||
50
|
||||
|
|
|
@ -80,6 +80,8 @@ SELECT count(*) FROM
|
|||
(SELECT l_orderkey FROM lineitem_subquery) UNION ALL
|
||||
(SELECT 1::bigint)
|
||||
) b;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 7_1 for subquery SELECT l_orderkey FROM public.lineitem_subquery
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -98,7 +100,10 @@ SELECT count(*) FROM
|
|||
(SELECT l_orderkey FROM lineitem_subquery) UNION
|
||||
(SELECT l_partkey FROM lineitem_subquery)
|
||||
) b;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 10_1 for subquery SELECT l_orderkey FROM public.lineitem_subquery
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 10_2 for subquery SELECT l_partkey FROM public.lineitem_subquery
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -117,6 +122,7 @@ SELECT count(*) FROM
|
|||
(SELECT l_orderkey FROM lineitem_subquery) UNION
|
||||
(SELECT l_orderkey FROM lineitem_subquery)
|
||||
) b;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
count
|
||||
-------
|
||||
2985
|
||||
|
@ -826,6 +832,7 @@ SET client_min_messages TO DEBUG2;
|
|||
SELECT * FROM
|
||||
(SELECT count(*) FROM subquery_pruning_varchar_test_table WHERE a = 'onder' GROUP BY a)
|
||||
AS foo;
|
||||
DEBUG: Router planner not enabled.
|
||||
count
|
||||
-------
|
||||
(0 rows)
|
||||
|
@ -833,6 +840,7 @@ AS foo;
|
|||
SELECT * FROM
|
||||
(SELECT count(*) FROM subquery_pruning_varchar_test_table WHERE 'eren' = a GROUP BY a)
|
||||
AS foo;
|
||||
DEBUG: Router planner not enabled.
|
||||
count
|
||||
-------
|
||||
(0 rows)
|
||||
|
|
|
@ -1499,8 +1499,11 @@ FROM
|
|||
ORDER BY
|
||||
user_id DESC, lastseen DESC
|
||||
LIMIT 10;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 53_1 for subquery SELECT user_id FROM public.users_table users WHERE ((user_id OPERATOR(pg_catalog.>) 1) AND (user_id OPERATOR(pg_catalog.<) 4) AND (value_2 OPERATOR(pg_catalog.>) 3))
|
||||
DEBUG: skipping recursive planning for the subquery since it contains references to outer queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: skipping recursive planning for the subquery since it contains references to outer queries
|
||||
ERROR: cannot push down this subquery
|
||||
DETAIL: Limit in subquery is currently unsupported when a subquery references a column from another query
|
||||
|
|
|
@ -281,8 +281,10 @@ SET client_min_messages TO DEBUG;
|
|||
SELECT count(*) FROM
|
||||
(SELECT random() FROM user_buy_test_table JOIN random() AS users_ref_test_table(id)
|
||||
ON user_buy_test_table.item_id > users_ref_test_table.id) subquery_1;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 30_1 for subquery SELECT id FROM random() users_ref_test_table(id)
|
||||
DEBUG: Plan 30 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT random() AS random FROM (public.user_buy_test_table JOIN (SELECT intermediate_result.id FROM read_intermediate_result('30_1'::text, 'binary'::citus_copy_format) intermediate_result(id double precision)) users_ref_test_table(id) ON (((user_buy_test_table.item_id)::double precision OPERATOR(pg_catalog.>) users_ref_test_table.id)))) subquery_1
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
count
|
||||
-------
|
||||
4
|
||||
|
@ -293,8 +295,10 @@ SELECT count(*) FROM
|
|||
(SELECT item_id FROM user_buy_test_table JOIN generate_series(random()::int,10) AS users_ref_test_table(id)
|
||||
ON user_buy_test_table.item_id > users_ref_test_table.id) subquery_1
|
||||
WHERE item_id = 6;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 31_1 for subquery SELECT id FROM generate_series((random())::integer, 10) users_ref_test_table(id)
|
||||
DEBUG: Plan 31 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT user_buy_test_table.item_id FROM (public.user_buy_test_table JOIN (SELECT intermediate_result.id FROM read_intermediate_result('31_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) users_ref_test_table(id) ON ((user_buy_test_table.item_id OPERATOR(pg_catalog.>) users_ref_test_table.id)))) subquery_1 WHERE (item_id OPERATOR(pg_catalog.=) 6)
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
count
|
||||
-------
|
||||
0
|
||||
|
@ -305,6 +309,8 @@ SELECT count(*) FROM
|
|||
(SELECT user_id FROM user_buy_test_table
|
||||
UNION ALL
|
||||
SELECT id FROM generate_series(1,10) AS users_ref_test_table(id)) subquery_1;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 32_1 for subquery SELECT user_id FROM public.user_buy_test_table
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -355,6 +361,8 @@ SELECT count(*) FROM
|
|||
(SELECT user_id FROM user_buy_test_table
|
||||
UNION ALL
|
||||
SELECT id FROM (SELECT 5 AS id) users_ref_test_table) subquery_1;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 39_1 for subquery SELECT user_id FROM public.user_buy_test_table
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -375,6 +383,8 @@ SELECT * FROM
|
|||
UNION
|
||||
SELECT user_id FROM user_buy_test_table) sub
|
||||
ORDER BY 1 DESC;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 42_1 for subquery SELECT user_id FROM public.user_buy_test_table
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -399,6 +409,8 @@ SELECT * FROM
|
|||
UNION
|
||||
SELECT user_id, random() * 0 FROM (SELECT user_id FROM user_buy_test_table) sub2) sub
|
||||
ORDER BY 1 DESC;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 45_1 for subquery SELECT user_id, (random() OPERATOR(pg_catalog.*) (0)::double precision) FROM (SELECT user_buy_test_table.user_id FROM public.user_buy_test_table) sub2
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -425,6 +437,7 @@ SELECT * FROM
|
|||
UNION
|
||||
SELECT user_id FROM user_buy_test_table) sub
|
||||
ORDER BY 1 DESC;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
user_id
|
||||
---------
|
||||
7
|
||||
|
@ -440,6 +453,7 @@ SELECT * FROM
|
|||
UNION
|
||||
SELECT user_id, random() * 0 FROM (SELECT user_id FROM user_buy_test_table) sub2) sub
|
||||
ORDER BY 1 DESC;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
user_id | ?column?
|
||||
---------+----------
|
||||
7 | 0
|
||||
|
@ -454,6 +468,7 @@ SELECT * FROM
|
|||
UNION
|
||||
SELECT user_id FROM user_buy_test_table) sub
|
||||
ORDER BY 1 DESC;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
ERROR: cannot pushdown the subquery
|
||||
DETAIL: There exist a reference table in the outer part of the outer join
|
||||
SELECT * FROM
|
||||
|
@ -462,6 +477,7 @@ SELECT * FROM
|
|||
UNION
|
||||
SELECT user_id FROM user_buy_test_table WHERE user_id in (select id from users_ref_test_table)) sub
|
||||
ORDER BY 1 DESC;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
user_id
|
||||
---------
|
||||
3
|
||||
|
@ -478,6 +494,7 @@ SELECT * FROM
|
|||
UNION
|
||||
SELECT user_id FROM user_buy_test_table) sub
|
||||
ORDER BY 1 DESC;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
user_id
|
||||
---------
|
||||
7
|
||||
|
@ -495,6 +512,7 @@ SELECT * FROM
|
|||
UNION
|
||||
SELECT user_id FROM user_buy_test_table) sub
|
||||
ORDER BY 1 DESC;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
ERROR: cannot pushdown the subquery
|
||||
DETAIL: There exist a reference table in the outer part of the outer join
|
||||
RESET client_min_messages;
|
||||
|
|
|
@ -77,6 +77,7 @@ SET client_min_messages TO DEBUG3;
|
|||
-- First test the default greedy task assignment policy
|
||||
SET citus.task_assignment_policy TO 'greedy';
|
||||
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
|
@ -88,6 +89,7 @@ DEBUG: assigned task 2 to node localhost:57637
|
|||
(3 rows)
|
||||
|
||||
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
|
@ -101,6 +103,7 @@ DEBUG: assigned task 2 to node localhost:57637
|
|||
-- Next test the first-replica task assignment policy
|
||||
SET citus.task_assignment_policy TO 'first-replica';
|
||||
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
|
@ -112,6 +115,7 @@ DEBUG: assigned task 1 to node localhost:57638
|
|||
(3 rows)
|
||||
|
||||
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
|
|
|
@ -931,11 +931,14 @@ JOIN LATERAL
|
|||
FROM events_table
|
||||
WHERE user_id = users_table.user_id) AS bar
|
||||
LEFT JOIN users_table u2 ON u2.user_id = bar.value_2) AS foo ON TRUE;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 99_1 for subquery SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.users_table OFFSET 0
|
||||
DEBUG: skipping recursive planning for the subquery since it contains references to outer queries
|
||||
DEBUG: skipping recursive planning for the subquery since it contains references to outer queries
|
||||
DEBUG: skipping recursive planning for the subquery since it contains references to outer queries
|
||||
DEBUG: Plan 99 query after replacing subqueries and CTEs: SELECT users_table.user_id, users_table."time", users_table.value_1, users_table.value_2, users_table.value_3, users_table.value_4, foo.user_id, foo."time", foo.event_type, foo.value_2, foo.value_3, foo.value_4, foo.user_id_1 AS user_id, foo.time_1 AS "time", foo.value_1, foo.value_2_1 AS value_2, foo.value_3_1 AS value_3, foo.value_4_1 AS value_4 FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('99_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) users_table JOIN LATERAL (SELECT bar.user_id, bar."time", bar.event_type, bar.value_2, bar.value_3, bar.value_4, u2.user_id, u2."time", u2.value_1, u2.value_2, u2.value_3, u2.value_4 FROM ((SELECT events_table.user_id, events_table."time", events_table.event_type, events_table.value_2, events_table.value_3, events_table.value_4 FROM public.events_table WHERE (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)) bar LEFT JOIN public.users_table u2 ON ((u2.user_id OPERATOR(pg_catalog.=) bar.value_2)))) foo(user_id, "time", event_type, value_2, value_3, value_4, user_id_1, time_1, value_1, value_2_1, value_3_1, value_4_1) ON (true))
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: skipping recursive planning for the subquery since it contains references to outer queries
|
||||
DEBUG: skipping recursive planning for the subquery since it contains references to outer queries
|
||||
DEBUG: skipping recursive planning for the subquery since it contains references to outer queries
|
||||
|
@ -954,6 +957,7 @@ JOIN LATERAL
|
|||
WHERE user_id = users_table.user_id) AS bar
|
||||
LEFT JOIN users_table u2 ON u2.user_id = bar.value_2) AS foo ON TRUE
|
||||
$$);
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: skipping recursive planning for the subquery since it contains references to outer queries
|
||||
DEBUG: skipping recursive planning for the subquery since it contains references to outer queries
|
||||
DEBUG: skipping recursive planning for the subquery since it contains references to outer queries
|
||||
|
@ -986,17 +990,23 @@ JOIN LATERAL
|
|||
) AS events_table
|
||||
WHERE user_id = users_table_limited.user_id) AS bar
|
||||
LEFT JOIN users_table u2 ON u2.user_id = bar.value_2) AS foo ON TRUE;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 102_1 for subquery SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.users_table
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 102_2 for subquery SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.>) 2)
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
DEBUG: generating subplan 102_3 for subquery SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('102_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint) EXCEPT SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('102_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 102_4 for subquery SELECT user_id, "time", event_type, value_2, value_3, value_4 FROM public.events_table WHERE (value_3 OPERATOR(pg_catalog.>) (4)::double precision)
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 102_5 for subquery SELECT user_id, "time", event_type, value_2, value_3, value_4 FROM public.events_table WHERE (value_2 OPERATOR(pg_catalog.>) 2)
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
DEBUG: generating subplan 102_6 for subquery SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('102_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint) INTERSECT SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('102_5'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint)
|
||||
DEBUG: Plan 102 query after replacing subqueries and CTEs: SELECT users_table_limited.user_id, users_table_limited."time", users_table_limited.value_1, users_table_limited.value_2, users_table_limited.value_3, users_table_limited.value_4, foo.user_id, foo."time", foo.event_type, foo.value_2, foo.value_3, foo.value_4, foo.user_id_1 AS user_id, foo.time_1 AS "time", foo.value_1, foo.value_2_1 AS value_2, foo.value_3_1 AS value_3, foo.value_4_1 AS value_4 FROM ((SELECT users_table_union.user_id, users_table_union."time", users_table_union.value_1, users_table_union.value_2, users_table_union.value_3, users_table_union.value_4 FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('102_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) users_table_union) users_table_limited JOIN LATERAL (SELECT bar.user_id, bar."time", bar.event_type, bar.value_2, bar.value_3, bar.value_4, u2.user_id, u2."time", u2.value_1, u2.value_2, u2.value_3, u2.value_4 FROM ((SELECT events_table.user_id, events_table."time", events_table.event_type, events_table.value_2, events_table.value_3, events_table.value_4 FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('102_6'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint)) events_table WHERE (events_table.user_id OPERATOR(pg_catalog.=) users_table_limited.user_id)) bar LEFT JOIN public.users_table u2 ON ((u2.user_id OPERATOR(pg_catalog.=) bar.value_2)))) foo(user_id, "time", event_type, value_2, value_3, value_4, user_id_1, time_1, value_1, value_2_1, value_3_1, value_4_1) ON (true))
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
ERROR: cannot pushdown the subquery
|
||||
-- similar to the above, but this time there are multiple
|
||||
-- non-colocated subquery joins one of them contains lateral
|
||||
|
@ -1027,16 +1037,22 @@ SELECT count(*) FROM events_table WHERE user_id NOT IN
|
|||
WHERE user_id = users_table_limited.user_id) AS bar
|
||||
LEFT JOIN users_table u2 ON u2.user_id = bar.value_2) AS foo ON TRUE
|
||||
);
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 109_1 for subquery SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.users_table
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 109_2 for subquery SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.>) 2)
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
DEBUG: generating subplan 109_3 for subquery SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('109_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint) EXCEPT SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('109_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 109_4 for subquery SELECT user_id, "time", event_type, value_2, value_3, value_4 FROM public.events_table WHERE (value_3 OPERATOR(pg_catalog.>) (4)::double precision)
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 109_5 for subquery SELECT user_id, "time", event_type, value_2, value_3, value_4 FROM public.events_table WHERE (value_2 OPERATOR(pg_catalog.>) 2)
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
DEBUG: generating subplan 109_6 for subquery SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('109_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint) INTERSECT SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('109_5'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint)
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
ERROR: cannot pushdown the subquery
|
||||
RESET client_min_messages;
|
||||
DROP FUNCTION explain_json_2(text);
|
||||
|
|
|
@ -41,7 +41,6 @@ FROM
|
|||
) as foo
|
||||
WHERE
|
||||
foo.avg_tenant_id::int::text = reference_table.id;
|
||||
DEBUG: only reference tables may be queried when targeting a reference table with multi shard UPDATE/DELETE queries with multiple tables
|
||||
DEBUG: generating subplan 4_1 for subquery SELECT avg((tenant_id)::integer) AS avg_tenant_id FROM recursive_dml_queries_mx.second_distributed_table
|
||||
DEBUG: Plan 4 query after replacing subqueries and CTEs: UPDATE recursive_dml_queries_mx.reference_table SET name = ('new_'::text OPERATOR(pg_catalog.||) reference_table.name) FROM (SELECT intermediate_result.avg_tenant_id FROM read_intermediate_result('4_1'::text, 'binary'::citus_copy_format) intermediate_result(avg_tenant_id numeric)) foo WHERE (((foo.avg_tenant_id)::integer)::text OPERATOR(pg_catalog.=) reference_table.id)
|
||||
-- the subquery foo is recursively planned
|
||||
|
@ -68,7 +67,6 @@ FROM
|
|||
WHERE
|
||||
foo.tenant_id != second_distributed_table.tenant_id
|
||||
AND second_distributed_table.dept IN (2);
|
||||
DEBUG: complex joins are only supported when all distributed tables are joined on their distribution columns with equal operator
|
||||
DEBUG: generating subplan 6_1 for subquery SELECT DISTINCT ON (tenant_id) tenant_id, max(dept) AS max_dept FROM (SELECT second_distributed_table.dept, second_distributed_table.tenant_id FROM recursive_dml_queries_mx.second_distributed_table, recursive_dml_queries_mx.distributed_table WHERE (distributed_table.tenant_id OPERATOR(pg_catalog.=) second_distributed_table.tenant_id)) foo_inner GROUP BY tenant_id ORDER BY tenant_id DESC
|
||||
DEBUG: Plan 6 query after replacing subqueries and CTEs: UPDATE recursive_dml_queries_mx.second_distributed_table SET dept = (foo.max_dept OPERATOR(pg_catalog.*) 2) FROM (SELECT intermediate_result.tenant_id, intermediate_result.max_dept FROM read_intermediate_result('6_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text, max_dept integer)) foo WHERE ((foo.tenant_id OPERATOR(pg_catalog.<>) second_distributed_table.tenant_id) AND (second_distributed_table.dept OPERATOR(pg_catalog.=) 2))
|
||||
-- run some queries from worker nodes
|
||||
|
|
|
@ -33,8 +33,6 @@ SET dept = foo.dept FROM
|
|||
(SELECT tenant_id FROM second_distributed_table WHERE dept IN (1, 2, 3, 4) OFFSET 0) as bar
|
||||
WHERE foo.tenant_id = bar.tenant_id
|
||||
AND distributed_table.tenant_id = bar.tenant_id;
|
||||
DEBUG: cannot push down this subquery
|
||||
DETAIL: Offset clause is currently unsupported when a subquery references a column from another query
|
||||
DEBUG: generating subplan 3_1 for subquery SELECT tenant_id FROM recursive_dml_with_different_planner_executors.second_distributed_table WHERE (dept OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])) OFFSET 0
|
||||
DEBUG: Plan 3 query after replacing subqueries and CTEs: UPDATE recursive_dml_with_different_planner_executors.distributed_table SET dept = foo.dept FROM (SELECT second_distributed_table.tenant_id, second_distributed_table.dept FROM recursive_dml_with_different_planner_executors.second_distributed_table WHERE (second_distributed_table.dept OPERATOR(pg_catalog.=) 1)) foo, (SELECT intermediate_result.tenant_id FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) bar WHERE ((foo.tenant_id OPERATOR(pg_catalog.=) bar.tenant_id) AND (distributed_table.tenant_id OPERATOR(pg_catalog.=) bar.tenant_id))
|
||||
-- a non colocated subquery inside the UPDATE
|
||||
|
@ -47,7 +45,6 @@ UPDATE distributed_table SET dept = foo.max_dept FROM
|
|||
WHERE tenant_id NOT IN
|
||||
(SELECT tenant_id FROM second_distributed_table WHERE dept IN (1, 2, 3, 4))
|
||||
) as foo WHERE foo.max_dept > dept * 3;
|
||||
DEBUG: complex joins are only supported when all distributed tables are joined on their distribution columns with equal operator
|
||||
DEBUG: generating subplan 5_1 for subquery SELECT tenant_id FROM recursive_dml_with_different_planner_executors.second_distributed_table WHERE (dept OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))
|
||||
DEBUG: generating subplan 5_2 for subquery SELECT max(dept) AS max_dept FROM (SELECT DISTINCT distributed_table_1.tenant_id, distributed_table_1.dept FROM recursive_dml_with_different_planner_executors.distributed_table distributed_table_1) distributed_table WHERE (NOT (tenant_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.tenant_id FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text))))
|
||||
DEBUG: Plan 5 query after replacing subqueries and CTEs: UPDATE recursive_dml_with_different_planner_executors.distributed_table SET dept = foo.max_dept FROM (SELECT intermediate_result.max_dept FROM read_intermediate_result('5_2'::text, 'binary'::citus_copy_format) intermediate_result(max_dept integer)) foo WHERE (foo.max_dept OPERATOR(pg_catalog.>) (distributed_table.dept OPERATOR(pg_catalog.*) 3))
|
||||
|
@ -59,7 +56,6 @@ UPDATE distributed_table SET dept = foo.some_tenants::int FROM
|
|||
DISTINCT second_distributed_table.tenant_id as some_tenants
|
||||
FROM second_distributed_table, distributed_table WHERE second_distributed_table.dept = distributed_table.dept
|
||||
) as foo;
|
||||
DEBUG: complex joins are only supported when all distributed tables are joined on their distribution columns with equal operator
|
||||
DEBUG: cannot use real time executor with repartition jobs
|
||||
HINT: Since you enabled citus.enable_repartition_joins Citus chose to use task-tracker.
|
||||
DEBUG: generating subplan 8_1 for subquery SELECT DISTINCT second_distributed_table.tenant_id AS some_tenants FROM recursive_dml_with_different_planner_executors.second_distributed_table, recursive_dml_with_different_planner_executors.distributed_table WHERE (second_distributed_table.dept OPERATOR(pg_catalog.=) distributed_table.dept)
|
||||
|
@ -75,7 +71,6 @@ UPDATE distributed_table SET dept = foo.max_dept FROM
|
|||
WHERE tenant_id IN
|
||||
(SELECT tenant_id FROM second_distributed_table WHERE dept IN (1, 2, 3, 4))
|
||||
) as foo WHERE foo.max_dept >= dept and tenant_id = '8';
|
||||
DEBUG: complex joins are only supported when all distributed tables are joined on their distribution columns with equal operator
|
||||
DEBUG: generating subplan 10_1 for subquery SELECT max(dept) AS max_dept FROM (SELECT DISTINCT distributed_table_1.tenant_id, distributed_table_1.dept FROM recursive_dml_with_different_planner_executors.distributed_table distributed_table_1) distributed_table WHERE (tenant_id OPERATOR(pg_catalog.=) ANY (SELECT second_distributed_table.tenant_id FROM recursive_dml_with_different_planner_executors.second_distributed_table WHERE (second_distributed_table.dept OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))))
|
||||
DEBUG: Plan 10 query after replacing subqueries and CTEs: UPDATE recursive_dml_with_different_planner_executors.distributed_table SET dept = foo.max_dept FROM (SELECT intermediate_result.max_dept FROM read_intermediate_result('10_1'::text, 'binary'::citus_copy_format) intermediate_result(max_dept integer)) foo WHERE ((foo.max_dept OPERATOR(pg_catalog.>=) distributed_table.dept) AND (distributed_table.tenant_id OPERATOR(pg_catalog.=) '8'::text))
|
||||
RESET client_min_messages;
|
||||
|
|
|
@ -21,7 +21,9 @@ INSERT INTO local_test VALUES (3,3), (4,4);
|
|||
SET client_min_messages TO DEBUG;
|
||||
-- we should be able to run set operations with local tables
|
||||
(SELECT x FROM test) INTERSECT (SELECT x FROM local_test) ORDER BY 1 DESC;
|
||||
DEBUG: Local tables cannot be used in distributed queries.
|
||||
DEBUG: generating subplan 3_1 for subquery SELECT x FROM recursive_set_local.local_test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 3_2 for subquery SELECT x FROM recursive_set_local.test
|
||||
DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('3_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT SELECT intermediate_result.x FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) ORDER BY 1 DESC
|
||||
DEBUG: Creating router plan
|
||||
|
@ -32,6 +34,8 @@ DEBUG: Plan is router executable
|
|||
|
||||
-- we should be able to run set operations with generate series
|
||||
(SELECT x FROM test) INTERSECT (SELECT i FROM generate_series(0, 100) i) ORDER BY 1 DESC;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 5_1 for subquery SELECT x FROM recursive_set_local.test
|
||||
DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT SELECT i.i FROM generate_series(0, 100) i(i) ORDER BY 1 DESC
|
||||
DEBUG: Creating router plan
|
||||
|
@ -45,6 +49,8 @@ DEBUG: Plan is router executable
|
|||
-- we'd first recursively plan the query with "test", thus don't need to recursively
|
||||
-- plan other query
|
||||
(SELECT x FROM test LIMIT 5) INTERSECT (SELECT i FROM generate_series(0, 100) i) ORDER BY 1 DESC;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: push down of limit count: 5
|
||||
DEBUG: generating subplan 7_1 for subquery SELECT x FROM recursive_set_local.test LIMIT 5
|
||||
DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('7_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT SELECT i.i FROM generate_series(0, 100) i(i) ORDER BY 1 DESC
|
||||
|
@ -68,6 +74,8 @@ DEBUG: Plan is router executable
|
|||
|
||||
-- same query with a failure on the worker (i.e., division by zero)
|
||||
(SELECT x FROM test) INTERSECT (SELECT i/0 FROM generate_series(0, 100) i) ORDER BY 1 DESC;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 10_1 for subquery SELECT x FROM recursive_set_local.test
|
||||
DEBUG: Plan 10 query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('10_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT SELECT (i.i OPERATOR(pg_catalog./) 0) FROM generate_series(0, 100) i(i) ORDER BY 1 DESC
|
||||
DEBUG: Creating router plan
|
||||
|
@ -76,7 +84,9 @@ ERROR: division by zero
|
|||
CONTEXT: while executing command on localhost:57637
|
||||
-- we should be able to run set operations with generate series and local tables as well
|
||||
((SELECT x FROM local_test) UNION ALL (SELECT x FROM test)) INTERSECT (SELECT i FROM generate_series(0, 100) i) ORDER BY 1 DESC;
|
||||
DEBUG: Local tables cannot be used in distributed queries.
|
||||
DEBUG: generating subplan 12_1 for subquery SELECT x FROM recursive_set_local.local_test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 12_2 for subquery SELECT x FROM recursive_set_local.test
|
||||
DEBUG: Plan 12 query after replacing subqueries and CTEs: (SELECT intermediate_result.x FROM read_intermediate_result('12_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION ALL SELECT intermediate_result.x FROM read_intermediate_result('12_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) INTERSECT SELECT i.i FROM generate_series(0, 100) i(i) ORDER BY 1 DESC
|
||||
DEBUG: Creating router plan
|
||||
|
@ -91,8 +101,10 @@ DEBUG: Plan is router executable
|
|||
|
||||
-- two local tables are on different leaf queries, so safe to plan & execute
|
||||
((SELECT x FROM local_test) UNION ALL (SELECT x FROM test)) INTERSECT (SELECT x FROM local_test) ORDER BY 1 DESC;
|
||||
DEBUG: Local tables cannot be used in distributed queries.
|
||||
DEBUG: generating subplan 14_1 for subquery SELECT x FROM recursive_set_local.local_test
|
||||
DEBUG: generating subplan 14_2 for subquery SELECT x FROM recursive_set_local.local_test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 14_3 for subquery SELECT x FROM recursive_set_local.test
|
||||
DEBUG: Plan 14 query after replacing subqueries and CTEs: (SELECT intermediate_result.x FROM read_intermediate_result('14_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION ALL SELECT intermediate_result.x FROM read_intermediate_result('14_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) INTERSECT SELECT intermediate_result.x FROM read_intermediate_result('14_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer) ORDER BY 1 DESC
|
||||
DEBUG: Creating router plan
|
||||
|
@ -109,8 +121,11 @@ cte_1 AS (SELECT user_id FROM users_table),
|
|||
cte_2 AS (SELECT user_id FROM events_table)
|
||||
((SELECT * FROM cte_1) UNION (SELECT * FROM cte_2) UNION (SELECT x FROM local_test)) INTERSECT (SELECT i FROM generate_series(0, 100) i)
|
||||
ORDER BY 1 DESC;
|
||||
DEBUG: Local tables cannot be used in distributed queries.
|
||||
DEBUG: generating subplan 16_1 for CTE cte_1: SELECT user_id FROM public.users_table
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 16_2 for CTE cte_2: SELECT user_id FROM public.events_table
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 16_3 for subquery SELECT x FROM recursive_set_local.local_test
|
||||
DEBUG: Plan 16 query after replacing subqueries and CTEs: (SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('16_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_2.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('16_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_2 UNION SELECT intermediate_result.x FROM read_intermediate_result('16_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) INTERSECT SELECT i.i FROM generate_series(0, 100) i(i) ORDER BY 1 DESC
|
||||
DEBUG: Creating router plan
|
||||
|
@ -137,7 +152,9 @@ FROM
|
|||
) as foo,
|
||||
test
|
||||
WHERE test.y = foo.x;
|
||||
DEBUG: Local tables cannot be used in distributed queries.
|
||||
DEBUG: generating subplan 19_1 for CTE cte_1: SELECT x FROM recursive_set_local.test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 19_2 for CTE cte_1: SELECT a FROM recursive_set_local.ref
|
||||
DEBUG: Distributed planning for a fast-path router query
|
||||
DEBUG: Creating router plan
|
||||
|
@ -147,6 +164,7 @@ DEBUG: Creating router plan
|
|||
DEBUG: Plan is router executable
|
||||
DEBUG: generating subplan 19_4 for subquery (SELECT cte_1.x FROM (SELECT intermediate_result.x FROM read_intermediate_result('19_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) cte_1 UNION SELECT cte_1.a FROM (SELECT intermediate_result.a FROM read_intermediate_result('19_2'::text, 'binary'::citus_copy_format) intermediate_result(a integer)) cte_1) INTERSECT SELECT intermediate_result.x FROM read_intermediate_result('19_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer)
|
||||
DEBUG: Plan 19 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.x FROM read_intermediate_result('19_4'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) foo, recursive_set_local.test WHERE (test.y OPERATOR(pg_catalog.=) foo.x)
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
count
|
||||
-------
|
||||
0
|
||||
|
@ -164,7 +182,9 @@ FROM
|
|||
) as foo,
|
||||
ref
|
||||
WHERE ref.a = foo.x;
|
||||
DEBUG: Local tables cannot be used in distributed queries.
|
||||
DEBUG: generating subplan 23_1 for CTE cte_1: SELECT x FROM recursive_set_local.test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 23_2 for CTE cte_1: SELECT a FROM recursive_set_local.ref
|
||||
DEBUG: Distributed planning for a fast-path router query
|
||||
DEBUG: Creating router plan
|
||||
|
@ -183,13 +203,17 @@ DEBUG: Plan is router executable
|
|||
|
||||
-- subquery union in WHERE clause without parition column equality is recursively planned including the local tables
|
||||
SELECT * FROM test a WHERE x IN (SELECT x FROM test b UNION SELECT y FROM test c UNION SELECT y FROM local_test d) ORDER BY 1,2;
|
||||
DEBUG: Local tables cannot be used in distributed queries.
|
||||
DEBUG: generating subplan 27_1 for subquery SELECT y FROM recursive_set_local.local_test d
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 27_2 for subquery SELECT x FROM recursive_set_local.test b
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 27_3 for subquery SELECT y FROM recursive_set_local.test c
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
DEBUG: generating subplan 27_4 for subquery SELECT intermediate_result.x FROM read_intermediate_result('27_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION SELECT intermediate_result.y FROM read_intermediate_result('27_3'::text, 'binary'::citus_copy_format) intermediate_result(y integer) UNION SELECT intermediate_result.y FROM read_intermediate_result('27_1'::text, 'binary'::citus_copy_format) intermediate_result(y integer)
|
||||
DEBUG: Plan 27 query after replacing subqueries and CTEs: SELECT x, y FROM recursive_set_local.test a WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.x FROM read_intermediate_result('27_4'::text, 'binary'::citus_copy_format) intermediate_result(x integer))) ORDER BY x, y
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
x | y
|
||||
---+---
|
||||
1 | 1
|
||||
|
@ -198,14 +222,19 @@ DEBUG: Plan 27 query after replacing subqueries and CTEs: SELECT x, y FROM recu
|
|||
|
||||
-- same query with subquery in where is wrapped in CTE
|
||||
SELECT * FROM test a WHERE x IN (WITH cte AS (SELECT x FROM test b UNION SELECT y FROM test c UNION SELECT y FROM local_test d) SELECT * FROM cte) ORDER BY 1,2;
|
||||
DEBUG: Local tables cannot be used in distributed queries.
|
||||
DEBUG: generating subplan 31_1 for CTE cte: SELECT b.x FROM recursive_set_local.test b UNION SELECT c.y FROM recursive_set_local.test c UNION SELECT d.y FROM recursive_set_local.local_test d
|
||||
DEBUG: Local tables cannot be used in distributed queries.
|
||||
DEBUG: generating subplan 32_1 for subquery SELECT y FROM recursive_set_local.local_test d
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 32_2 for subquery SELECT x FROM recursive_set_local.test b
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 32_3 for subquery SELECT y FROM recursive_set_local.test c
|
||||
DEBUG: Plan 32 query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('32_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION SELECT intermediate_result.y FROM read_intermediate_result('32_3'::text, 'binary'::citus_copy_format) intermediate_result(y integer) UNION SELECT intermediate_result.y FROM read_intermediate_result('32_1'::text, 'binary'::citus_copy_format) intermediate_result(y integer)
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
DEBUG: Plan 31 query after replacing subqueries and CTEs: SELECT x, y FROM recursive_set_local.test a WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT cte.x FROM (SELECT intermediate_result.x FROM read_intermediate_result('31_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) cte)) ORDER BY x, y
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
x | y
|
||||
---+---
|
||||
1 | 1
|
||||
|
@ -214,34 +243,44 @@ DEBUG: Plan 31 query after replacing subqueries and CTEs: SELECT x, y FROM recu
|
|||
|
||||
-- not supported since local table is joined with a set operation
|
||||
SELECT * FROM ((SELECT * FROM test) EXCEPT (SELECT * FROM test ORDER BY x LIMIT 1)) u JOIN local_test USING (x) ORDER BY 1,2;
|
||||
DEBUG: Local tables cannot be used in distributed queries.
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: push down of limit count: 1
|
||||
DEBUG: generating subplan 35_1 for subquery SELECT x, y FROM recursive_set_local.test ORDER BY x LIMIT 1
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 35_2 for subquery SELECT x, y FROM recursive_set_local.test
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
DEBUG: generating subplan 35_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('35_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) EXCEPT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('35_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)
|
||||
DEBUG: Plan 35 query after replacing subqueries and CTEs: SELECT u.x, u.y, local_test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('35_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_set_local.local_test USING (x)) ORDER BY u.x, u.y
|
||||
DEBUG: Local tables cannot be used in distributed queries.
|
||||
ERROR: relation local_test is not distributed
|
||||
-- though we replace some queries including the local query, the intermediate result is on the outer part of an outer join
|
||||
SELECT * FROM ((SELECT * FROM local_test) INTERSECT (SELECT * FROM test ORDER BY x LIMIT 1)) u LEFT JOIN test USING (x) ORDER BY 1,2;
|
||||
DEBUG: Local tables cannot be used in distributed queries.
|
||||
DEBUG: generating subplan 39_1 for subquery SELECT x, y FROM recursive_set_local.local_test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: push down of limit count: 1
|
||||
DEBUG: generating subplan 39_2 for subquery SELECT x, y FROM recursive_set_local.test ORDER BY x LIMIT 1
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
DEBUG: generating subplan 39_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('39_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('39_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)
|
||||
DEBUG: Plan 39 query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('39_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u LEFT JOIN recursive_set_local.test USING (x)) ORDER BY u.x, u.y
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
ERROR: cannot pushdown the subquery
|
||||
DETAIL: Complex subqueries and CTEs cannot be in the outer part of the outer join
|
||||
-- we replace some queries including the local query, the intermediate result is on the inner part of an outer join
|
||||
SELECT * FROM ((SELECT * FROM local_test) INTERSECT (SELECT * FROM test ORDER BY x LIMIT 1)) u RIGHT JOIN test USING (x) ORDER BY 1,2;
|
||||
DEBUG: Local tables cannot be used in distributed queries.
|
||||
DEBUG: generating subplan 42_1 for subquery SELECT x, y FROM recursive_set_local.local_test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: push down of limit count: 1
|
||||
DEBUG: generating subplan 42_2 for subquery SELECT x, y FROM recursive_set_local.test ORDER BY x LIMIT 1
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
DEBUG: generating subplan 42_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('42_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('42_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)
|
||||
DEBUG: Plan 42 query after replacing subqueries and CTEs: SELECT test.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('42_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u RIGHT JOIN recursive_set_local.test USING (x)) ORDER BY test.x, u.y
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
x | y | y
|
||||
---+---+---
|
||||
1 | | 1
|
||||
|
@ -250,24 +289,30 @@ DEBUG: Plan 42 query after replacing subqueries and CTEs: SELECT test.x, u.y, t
|
|||
|
||||
-- recurively plan left part of the join, and run a final real-time query
|
||||
SELECT * FROM ((SELECT * FROM local_test) INTERSECT (SELECT * FROM test ORDER BY x LIMIT 1)) u INNER JOIN test USING (x) ORDER BY 1,2;
|
||||
DEBUG: Local tables cannot be used in distributed queries.
|
||||
DEBUG: generating subplan 45_1 for subquery SELECT x, y FROM recursive_set_local.local_test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: push down of limit count: 1
|
||||
DEBUG: generating subplan 45_2 for subquery SELECT x, y FROM recursive_set_local.test ORDER BY x LIMIT 1
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
DEBUG: generating subplan 45_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('45_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('45_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)
|
||||
DEBUG: Plan 45 query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('45_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_set_local.test USING (x)) ORDER BY u.x, u.y
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
x | y | y
|
||||
---+---+---
|
||||
(0 rows)
|
||||
|
||||
-- set operations and the sublink can be recursively planned
|
||||
SELECT * FROM ((SELECT x FROM test) UNION (SELECT x FROM (SELECT x FROM local_test) as foo WHERE x IN (SELECT x FROM test))) u ORDER BY 1;
|
||||
DEBUG: Local tables cannot be used in distributed queries.
|
||||
DEBUG: generating subplan 48_1 for subquery SELECT x FROM recursive_set_local.local_test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 48_2 for subquery SELECT x FROM recursive_set_local.test
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
DEBUG: generating subplan 48_3 for subquery SELECT x FROM (SELECT intermediate_result.x FROM read_intermediate_result('48_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) foo WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.x FROM read_intermediate_result('48_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer)))
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 48_4 for subquery SELECT x FROM recursive_set_local.test
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -284,6 +329,8 @@ DEBUG: Plan is router executable
|
|||
SET citus.enable_repartition_joins TO ON;
|
||||
-- repartition is recursively planned before the set operation
|
||||
(SELECT x FROM test) INTERSECT (SELECT t1.x FROM test as t1, test as t2 WHERE t1.x = t2.y LIMIT 2) INTERSECT (((SELECT x FROM local_test) UNION ALL (SELECT x FROM test)) INTERSECT (SELECT i FROM generate_series(0, 100) i)) ORDER BY 1 DESC;
|
||||
DEBUG: Local tables cannot be used in distributed queries.
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: push down of limit count: 2
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
|
@ -317,7 +364,9 @@ DEBUG: cannot use real time executor with repartition jobs
|
|||
HINT: Since you enabled citus.enable_repartition_joins Citus chose to use task-tracker.
|
||||
DEBUG: generating subplan 53_1 for subquery SELECT t1.x FROM recursive_set_local.test t1, recursive_set_local.test t2 WHERE (t1.x OPERATOR(pg_catalog.=) t2.y) LIMIT 2
|
||||
DEBUG: generating subplan 53_2 for subquery SELECT x FROM recursive_set_local.local_test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 53_3 for subquery SELECT x FROM recursive_set_local.test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 53_4 for subquery SELECT x FROM recursive_set_local.test
|
||||
DEBUG: Plan 53 query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('53_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT SELECT intermediate_result.x FROM read_intermediate_result('53_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT ((SELECT intermediate_result.x FROM read_intermediate_result('53_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION ALL SELECT intermediate_result.x FROM read_intermediate_result('53_4'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) INTERSECT SELECT i.i FROM generate_series(0, 100) i(i)) ORDER BY 1 DESC
|
||||
DEBUG: Creating router plan
|
||||
|
|
|
@ -19,7 +19,10 @@ INSERT INTO ref VALUES (2,2), (3,3);
|
|||
-- top-level set operations are supported through recursive planning
|
||||
SET client_min_messages TO DEBUG;
|
||||
(SELECT * FROM test) UNION (SELECT * FROM test) ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 3_1 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 3_2 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('3_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ORDER BY 1, 2
|
||||
DEBUG: Creating router plan
|
||||
|
@ -31,6 +34,8 @@ DEBUG: Plan is router executable
|
|||
(2 rows)
|
||||
|
||||
(SELECT * FROM test) UNION (SELECT * FROM ref) ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 6_1 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Plan 6 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('6_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT ref.a, ref.b FROM recursive_union.ref ORDER BY 1, 2
|
||||
DEBUG: Creating router plan
|
||||
|
@ -52,7 +57,10 @@ DEBUG: Plan is router executable
|
|||
(2 rows)
|
||||
|
||||
(SELECT * FROM test) UNION ALL (SELECT * FROM test) ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 9_1 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 9_2 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Plan 9 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('9_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION ALL SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('9_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ORDER BY 1, 2
|
||||
DEBUG: Creating router plan
|
||||
|
@ -66,6 +74,8 @@ DEBUG: Plan is router executable
|
|||
(4 rows)
|
||||
|
||||
(SELECT * FROM test) UNION ALL (SELECT * FROM ref) ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 12_1 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Plan 12 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('12_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION ALL SELECT ref.a, ref.b FROM recursive_union.ref ORDER BY 1, 2
|
||||
DEBUG: Creating router plan
|
||||
|
@ -90,7 +100,10 @@ DEBUG: Plan is router executable
|
|||
(4 rows)
|
||||
|
||||
(SELECT * FROM test) INTERSECT (SELECT * FROM test) ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 15_1 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 15_2 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Plan 15 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('15_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('15_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ORDER BY 1, 2
|
||||
DEBUG: Creating router plan
|
||||
|
@ -102,6 +115,8 @@ DEBUG: Plan is router executable
|
|||
(2 rows)
|
||||
|
||||
(SELECT * FROM test) INTERSECT (SELECT * FROM ref) ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 18_1 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Plan 18 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('18_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT SELECT ref.a, ref.b FROM recursive_union.ref ORDER BY 1, 2
|
||||
DEBUG: Creating router plan
|
||||
|
@ -121,7 +136,10 @@ DEBUG: Plan is router executable
|
|||
(2 rows)
|
||||
|
||||
(SELECT * FROM test) INTERSECT ALL (SELECT * FROM test) ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 21_1 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 21_2 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Plan 21 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('21_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT ALL SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('21_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ORDER BY 1, 2
|
||||
DEBUG: Creating router plan
|
||||
|
@ -133,6 +151,8 @@ DEBUG: Plan is router executable
|
|||
(2 rows)
|
||||
|
||||
(SELECT * FROM test) INTERSECT ALL (SELECT * FROM ref) ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 24_1 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Plan 24 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('24_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT ALL SELECT ref.a, ref.b FROM recursive_union.ref ORDER BY 1, 2
|
||||
DEBUG: Creating router plan
|
||||
|
@ -152,7 +172,10 @@ DEBUG: Plan is router executable
|
|||
(2 rows)
|
||||
|
||||
(SELECT * FROM test) EXCEPT (SELECT * FROM test) ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 27_1 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 27_2 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Plan 27 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('27_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) EXCEPT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('27_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ORDER BY 1, 2
|
||||
DEBUG: Creating router plan
|
||||
|
@ -162,6 +185,8 @@ DEBUG: Plan is router executable
|
|||
(0 rows)
|
||||
|
||||
(SELECT * FROM test) EXCEPT (SELECT * FROM ref) ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 30_1 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Plan 30 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('30_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) EXCEPT SELECT ref.a, ref.b FROM recursive_union.ref ORDER BY 1, 2
|
||||
DEBUG: Creating router plan
|
||||
|
@ -179,7 +204,10 @@ DEBUG: Plan is router executable
|
|||
(0 rows)
|
||||
|
||||
(SELECT * FROM test) EXCEPT ALL (SELECT * FROM test) ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 33_1 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 33_2 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Plan 33 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('33_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) EXCEPT ALL SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('33_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ORDER BY 1, 2
|
||||
DEBUG: Creating router plan
|
||||
|
@ -189,6 +217,8 @@ DEBUG: Plan is router executable
|
|||
(0 rows)
|
||||
|
||||
(SELECT * FROM test) EXCEPT ALL (SELECT * FROM ref) ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 36_1 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Plan 36 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('36_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) EXCEPT ALL SELECT ref.a, ref.b FROM recursive_union.ref ORDER BY 1, 2
|
||||
DEBUG: Creating router plan
|
||||
|
@ -216,7 +246,10 @@ EXCEPT
|
|||
UNION
|
||||
(SELECT test.x, ref.a FROM test LEFT JOIN ref ON (x = a))
|
||||
ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 39_1 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 39_2 for subquery SELECT test.x, ref.a FROM (recursive_union.test LEFT JOIN recursive_union.ref ON ((test.x OPERATOR(pg_catalog.=) ref.a)))
|
||||
DEBUG: Plan 39 query after replacing subqueries and CTEs: (((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('39_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT SELECT ref.a, ref.b FROM recursive_union.ref) UNION ALL SELECT s.s, s.s FROM generate_series(1, 10) s(s)) EXCEPT SELECT 1, 1) UNION SELECT intermediate_result.x, intermediate_result.a FROM read_intermediate_result('39_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, a integer) ORDER BY 1, 2
|
||||
DEBUG: Creating router plan
|
||||
|
@ -237,6 +270,7 @@ DEBUG: Plan is router executable
|
|||
|
||||
-- within a subquery, some unions can be pushed down
|
||||
SELECT * FROM ((SELECT * FROM test) UNION (SELECT * FROM test)) u ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
x | y
|
||||
---+---
|
||||
1 | 1
|
||||
|
@ -244,7 +278,10 @@ SELECT * FROM ((SELECT * FROM test) UNION (SELECT * FROM test)) u ORDER BY 1,2;
|
|||
(2 rows)
|
||||
|
||||
SELECT * FROM ((SELECT x, y FROM test) UNION (SELECT y, x FROM test)) u ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 43_1 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 43_2 for subquery SELECT y, x FROM recursive_union.test
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -259,6 +296,8 @@ DEBUG: Plan is router executable
|
|||
(2 rows)
|
||||
|
||||
SELECT * FROM ((SELECT * FROM test) UNION (SELECT * FROM ref)) u ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 47_1 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -283,6 +322,7 @@ DEBUG: Plan is router executable
|
|||
(2 rows)
|
||||
|
||||
SELECT * FROM ((SELECT * FROM test) UNION ALL (SELECT * FROM test)) u ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
x | y
|
||||
---+---
|
||||
1 | 1
|
||||
|
@ -292,6 +332,7 @@ SELECT * FROM ((SELECT * FROM test) UNION ALL (SELECT * FROM test)) u ORDER BY 1
|
|||
(4 rows)
|
||||
|
||||
SELECT * FROM ((SELECT x, y FROM test) UNION ALL (SELECT y, x FROM test)) u ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
x | y
|
||||
---+---
|
||||
1 | 1
|
||||
|
@ -301,6 +342,8 @@ SELECT * FROM ((SELECT x, y FROM test) UNION ALL (SELECT y, x FROM test)) u ORDE
|
|||
(4 rows)
|
||||
|
||||
SELECT * FROM ((SELECT * FROM test) UNION ALL (SELECT * FROM ref)) u ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 53_1 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -328,7 +371,10 @@ DEBUG: Plan is router executable
|
|||
(4 rows)
|
||||
|
||||
SELECT * FROM ((SELECT * FROM test) INTERSECT (SELECT * FROM test)) u ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 57_1 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 57_2 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -343,7 +389,10 @@ DEBUG: Plan is router executable
|
|||
(2 rows)
|
||||
|
||||
SELECT * FROM ((SELECT x, y FROM test) INTERSECT (SELECT y, x FROM test)) u ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 61_1 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 61_2 for subquery SELECT y, x FROM recursive_union.test
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -358,6 +407,8 @@ DEBUG: Plan is router executable
|
|||
(2 rows)
|
||||
|
||||
SELECT * FROM ((SELECT * FROM test) INTERSECT (SELECT * FROM ref)) u ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 65_1 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -380,7 +431,10 @@ DEBUG: Plan is router executable
|
|||
(2 rows)
|
||||
|
||||
SELECT * FROM ((SELECT * FROM test) EXCEPT (SELECT * FROM test)) u ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 69_1 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 69_2 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -393,7 +447,10 @@ DEBUG: Plan is router executable
|
|||
(0 rows)
|
||||
|
||||
SELECT * FROM ((SELECT x, y FROM test) EXCEPT (SELECT y, x FROM test)) u ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 73_1 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 73_2 for subquery SELECT y, x FROM recursive_union.test
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -406,6 +463,8 @@ DEBUG: Plan is router executable
|
|||
(0 rows)
|
||||
|
||||
SELECT * FROM ((SELECT * FROM test) EXCEPT (SELECT * FROM ref)) u ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 77_1 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -427,6 +486,7 @@ DEBUG: Plan is router executable
|
|||
|
||||
-- unions can even be pushed down within a join
|
||||
SELECT * FROM ((SELECT * FROM test) UNION (SELECT * FROM test)) u JOIN test USING (x) ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
x | y | y
|
||||
---+---+---
|
||||
1 | 1 | 1
|
||||
|
@ -434,6 +494,7 @@ SELECT * FROM ((SELECT * FROM test) UNION (SELECT * FROM test)) u JOIN test USIN
|
|||
(2 rows)
|
||||
|
||||
SELECT * FROM ((SELECT * FROM test) UNION ALL (SELECT * FROM test)) u LEFT JOIN test USING (x) ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
x | y | y
|
||||
---+---+---
|
||||
1 | 1 | 1
|
||||
|
@ -444,13 +505,17 @@ SELECT * FROM ((SELECT * FROM test) UNION ALL (SELECT * FROM test)) u LEFT JOIN
|
|||
|
||||
-- unions cannot be pushed down if one leaf recurs
|
||||
SELECT * FROM ((SELECT * FROM test) UNION (SELECT * FROM test ORDER BY x LIMIT 1)) u JOIN test USING (x) ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: push down of limit count: 1
|
||||
DEBUG: generating subplan 83_1 for subquery SELECT x, y FROM recursive_union.test ORDER BY x LIMIT 1
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 83_2 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
DEBUG: generating subplan 83_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('83_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('83_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)
|
||||
DEBUG: Plan 83 query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('83_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_union.test USING (x)) ORDER BY u.x, u.y
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
x | y | y
|
||||
---+---+---
|
||||
1 | 1 | 1
|
||||
|
@ -458,23 +523,31 @@ DEBUG: Plan 83 query after replacing subqueries and CTEs: SELECT u.x, u.y, test
|
|||
(2 rows)
|
||||
|
||||
SELECT * FROM ((SELECT * FROM test) UNION ALL (SELECT * FROM test ORDER BY x LIMIT 1)) u LEFT JOIN test USING (x) ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: push down of limit count: 1
|
||||
DEBUG: generating subplan 87_1 for subquery SELECT x, y FROM recursive_union.test ORDER BY x LIMIT 1
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 87_2 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
DEBUG: generating subplan 87_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('87_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION ALL SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('87_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)
|
||||
DEBUG: Plan 87 query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('87_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u LEFT JOIN recursive_union.test USING (x)) ORDER BY u.x, u.y
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
ERROR: cannot pushdown the subquery
|
||||
DETAIL: Complex subqueries and CTEs cannot be in the outer part of the outer join
|
||||
-- unions in a join without partition column equality (column names from first query are used for join)
|
||||
SELECT * FROM ((SELECT x, y FROM test) UNION (SELECT y, x FROM test)) u JOIN test USING (x) ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 91_1 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 91_2 for subquery SELECT y, x FROM recursive_union.test
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
DEBUG: generating subplan 91_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('91_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result.y, intermediate_result.x FROM read_intermediate_result('91_2'::text, 'binary'::citus_copy_format) intermediate_result(y integer, x integer)
|
||||
DEBUG: Plan 91 query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('91_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_union.test USING (x)) ORDER BY u.x, u.y
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
x | y | y
|
||||
---+---+---
|
||||
1 | 1 | 1
|
||||
|
@ -482,12 +555,16 @@ DEBUG: Plan 91 query after replacing subqueries and CTEs: SELECT u.x, u.y, test
|
|||
(2 rows)
|
||||
|
||||
SELECT * FROM ((SELECT x, y FROM test) UNION (SELECT 1, 1 FROM test)) u JOIN test USING (x) ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 95_1 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 95_2 for subquery SELECT 1, 1 FROM recursive_union.test
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
DEBUG: generating subplan 95_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('95_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?" FROM read_intermediate_result('95_2'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer)
|
||||
DEBUG: Plan 95 query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('95_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_union.test USING (x)) ORDER BY u.x, u.y
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
x | y | y
|
||||
---+---+---
|
||||
1 | 1 | 1
|
||||
|
@ -496,6 +573,7 @@ DEBUG: Plan 95 query after replacing subqueries and CTEs: SELECT u.x, u.y, test
|
|||
|
||||
-- a join between a set operation and a generate_series which is pushdownable
|
||||
SELECT * FROM ((SELECT * FROM test) UNION (SELECT * FROM test ORDER BY x)) u JOIN generate_series(1,10) x USING (x) ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
x | y
|
||||
---+---
|
||||
1 | 1
|
||||
|
@ -504,7 +582,10 @@ DEBUG: Plan 95 query after replacing subqueries and CTEs: SELECT u.x, u.y, test
|
|||
|
||||
-- a join between a set operation and a generate_series which is not pushdownable due to EXCEPT
|
||||
SELECT * FROM ((SELECT * FROM test) EXCEPT (SELECT * FROM test ORDER BY x)) u JOIN generate_series(1,10) x USING (x) ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 100_1 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 100_2 for subquery SELECT x, y FROM recursive_union.test ORDER BY x
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -518,13 +599,17 @@ DEBUG: Plan is router executable
|
|||
|
||||
-- subqueries in WHERE clause with set operations fails due to the current limitaions of recursive planning IN WHERE clause
|
||||
SELECT * FROM ((SELECT * FROM test) UNION (SELECT * FROM test)) foo WHERE x IN (SELECT y FROM test);
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
ERROR: cannot pushdown the subquery since not all subqueries in the UNION have the partition column in the same position
|
||||
DETAIL: Each leaf query of the UNION should return the partition column in the same position and all joins must be on the partition column
|
||||
-- subqueries in WHERE clause forced to be recursively planned
|
||||
SELECT * FROM ((SELECT * FROM test) UNION (SELECT * FROM test)) foo WHERE x IN (SELECT y FROM test ORDER BY 1 LIMIT 4) ORDER BY 1;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: push down of limit count: 4
|
||||
DEBUG: generating subplan 105_1 for subquery SELECT y FROM recursive_union.test ORDER BY y LIMIT 4
|
||||
DEBUG: Plan 105 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT test.x, test.y FROM recursive_union.test UNION SELECT test.x, test.y FROM recursive_union.test) foo WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.y FROM read_intermediate_result('105_1'::text, 'binary'::citus_copy_format) intermediate_result(y integer))) ORDER BY x
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
x | y
|
||||
---+---
|
||||
1 | 1
|
||||
|
@ -533,9 +618,13 @@ DEBUG: Plan 105 query after replacing subqueries and CTEs: SELECT x, y FROM (SE
|
|||
|
||||
-- now both the set operations and the sublink is recursively planned
|
||||
SELECT * FROM ((SELECT x,y FROM test) UNION (SELECT y,x FROM test)) foo WHERE x IN (SELECT y FROM test ORDER BY 1 LIMIT 4) ORDER BY 1;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: push down of limit count: 4
|
||||
DEBUG: generating subplan 107_1 for subquery SELECT y FROM recursive_union.test ORDER BY y LIMIT 4
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 107_2 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 107_3 for subquery SELECT y, x FROM recursive_union.test
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -551,11 +640,15 @@ DEBUG: Plan is router executable
|
|||
|
||||
-- set operations and the sublink can be recursively planned
|
||||
SELECT * FROM ((SELECT x,y FROM test) UNION (SELECT y,x FROM test)) foo WHERE x IN (SELECT y FROM test) ORDER BY 1;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 112_1 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 112_2 for subquery SELECT y, x FROM recursive_union.test
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
DEBUG: generating subplan 112_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('112_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result.y, intermediate_result.x FROM read_intermediate_result('112_2'::text, 'binary'::citus_copy_format) intermediate_result(y integer, x integer)
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 112_4 for subquery SELECT y FROM recursive_union.test
|
||||
DEBUG: Plan 112 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('112_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) foo WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.y FROM read_intermediate_result('112_4'::text, 'binary'::citus_copy_format) intermediate_result(y integer))) ORDER BY x
|
||||
DEBUG: Creating router plan
|
||||
|
@ -571,7 +664,10 @@ SELECT x, y, rnk FROM (SELECT *, rank() OVER my_win as rnk FROM test WINDOW my_w
|
|||
UNION
|
||||
SELECT x, y, rnk FROM (SELECT *, rank() OVER my_win as rnk FROM test WINDOW my_win AS (PARTITION BY x ORDER BY y DESC)) as bar
|
||||
ORDER BY 1 DESC, 2 DESC, 3 DESC;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 117_1 for subquery SELECT x, y, rnk FROM (SELECT test.x, test.y, rank() OVER my_win AS rnk FROM recursive_union.test WINDOW my_win AS (PARTITION BY test.x ORDER BY test.y DESC)) foo
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 117_2 for subquery SELECT x, y, rnk FROM (SELECT test.x, test.y, rank() OVER my_win AS rnk FROM recursive_union.test WINDOW my_win AS (PARTITION BY test.x ORDER BY test.y DESC)) bar
|
||||
DEBUG: Plan 117 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y, intermediate_result.rnk FROM read_intermediate_result('117_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer, rnk bigint) UNION SELECT intermediate_result.x, intermediate_result.y, intermediate_result.rnk FROM read_intermediate_result('117_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer, rnk bigint) ORDER BY 1 DESC, 2 DESC, 3 DESC
|
||||
DEBUG: Creating router plan
|
||||
|
@ -586,38 +682,51 @@ DEBUG: Plan is router executable
|
|||
SELECT x, y, rnk FROM (SELECT *, rank() OVER my_win as rnk FROM test WINDOW my_win AS (PARTITION BY y ORDER BY x DESC)) as foo
|
||||
UNION
|
||||
SELECT x, y, rnk FROM (SELECT *, rank() OVER my_win as rnk FROM test WINDOW my_win AS (PARTITION BY y ORDER BY x DESC)) as bar;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
ERROR: could not run distributed query because the window function that is used cannot be pushed down
|
||||
HINT: Window functions are supported in two ways. Either add an equality filter on the distributed tables' partition column or use the window functions with a PARTITION BY clause containing the distribution column
|
||||
-- other set operations in joins also cannot be pushed down
|
||||
SELECT * FROM ((SELECT * FROM test) EXCEPT (SELECT * FROM test ORDER BY x LIMIT 1)) u JOIN test USING (x) ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: push down of limit count: 1
|
||||
DEBUG: generating subplan 122_1 for subquery SELECT x, y FROM recursive_union.test ORDER BY x LIMIT 1
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 122_2 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
DEBUG: generating subplan 122_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('122_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) EXCEPT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('122_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)
|
||||
DEBUG: Plan 122 query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('122_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_union.test USING (x)) ORDER BY u.x, u.y
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
x | y | y
|
||||
---+---+---
|
||||
2 | 2 | 2
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM ((SELECT * FROM test) INTERSECT (SELECT * FROM test ORDER BY x LIMIT 1)) u LEFT JOIN test USING (x) ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: push down of limit count: 1
|
||||
DEBUG: generating subplan 126_1 for subquery SELECT x, y FROM recursive_union.test ORDER BY x LIMIT 1
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 126_2 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
DEBUG: generating subplan 126_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('126_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('126_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)
|
||||
DEBUG: Plan 126 query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('126_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u LEFT JOIN recursive_union.test USING (x)) ORDER BY u.x, u.y
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
ERROR: cannot pushdown the subquery
|
||||
DETAIL: Complex subqueries and CTEs cannot be in the outer part of the outer join
|
||||
-- distributed table in WHERE clause is recursively planned
|
||||
SELECT * FROM ((SELECT * FROM test) UNION (SELECT * FROM ref WHERE a IN (SELECT x FROM test))) u ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 130_1 for subquery SELECT x FROM recursive_union.test
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
DEBUG: generating subplan 130_2 for subquery SELECT a, b FROM recursive_union.ref WHERE (a OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.x FROM read_intermediate_result('130_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer)))
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 130_3 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -633,6 +742,7 @@ DEBUG: Plan is router executable
|
|||
|
||||
-- subquery union in WHERE clause with partition column equality and implicit join is pushed down
|
||||
SELECT * FROM test a WHERE x IN (SELECT x FROM test b WHERE y = 1 UNION SELECT x FROM test c WHERE y = 2) ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
x | y
|
||||
---+---
|
||||
1 | 1
|
||||
|
@ -641,25 +751,34 @@ SELECT * FROM test a WHERE x IN (SELECT x FROM test b WHERE y = 1 UNION SELECT x
|
|||
|
||||
-- subquery union in WHERE clause with partition column equality, without implicit join on partition column is recursively planned
|
||||
SELECT * FROM test a WHERE x NOT IN (SELECT x FROM test b WHERE y = 1 UNION SELECT x FROM test c WHERE y = 2) ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 137_1 for subquery SELECT x FROM recursive_union.test b WHERE (y OPERATOR(pg_catalog.=) 1)
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 137_2 for subquery SELECT x FROM recursive_union.test c WHERE (y OPERATOR(pg_catalog.=) 2)
|
||||
DEBUG: Plan 137 query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('137_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION SELECT intermediate_result.x FROM read_intermediate_result('137_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer)
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
DEBUG: generating subplan 136_1 for subquery SELECT b.x FROM recursive_union.test b WHERE (b.y OPERATOR(pg_catalog.=) 1) UNION SELECT c.x FROM recursive_union.test c WHERE (c.y OPERATOR(pg_catalog.=) 2)
|
||||
DEBUG: Plan 136 query after replacing subqueries and CTEs: SELECT x, y FROM recursive_union.test a WHERE (NOT (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.x FROM read_intermediate_result('136_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer)))) ORDER BY x, y
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
x | y
|
||||
---+---
|
||||
(0 rows)
|
||||
|
||||
-- subquery union in WHERE clause without parition column equality is recursively planned
|
||||
SELECT * FROM test a WHERE x IN (SELECT x FROM test b UNION SELECT y FROM test c) ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 140_1 for subquery SELECT x FROM recursive_union.test b
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 140_2 for subquery SELECT y FROM recursive_union.test c
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
DEBUG: generating subplan 140_3 for subquery SELECT intermediate_result.x FROM read_intermediate_result('140_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION SELECT intermediate_result.y FROM read_intermediate_result('140_2'::text, 'binary'::citus_copy_format) intermediate_result(y integer)
|
||||
DEBUG: Plan 140 query after replacing subqueries and CTEs: SELECT x, y FROM recursive_union.test a WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.x FROM read_intermediate_result('140_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer))) ORDER BY x, y
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
x | y
|
||||
---+---
|
||||
1 | 1
|
||||
|
@ -668,17 +787,24 @@ DEBUG: Plan 140 query after replacing subqueries and CTEs: SELECT x, y FROM rec
|
|||
|
||||
-- correlated subquery with union in WHERE clause
|
||||
SELECT * FROM test a WHERE x IN (SELECT x FROM test b UNION SELECT y FROM test c WHERE a.x = c.x) ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 144_1 for subquery SELECT x FROM recursive_union.test b
|
||||
DEBUG: skipping recursive planning for the subquery since it contains references to outer queries
|
||||
DEBUG: skipping recursive planning for the subquery since it contains references to outer queries
|
||||
DEBUG: Plan 144 query after replacing subqueries and CTEs: SELECT x, y FROM recursive_union.test a WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.x FROM read_intermediate_result('144_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION SELECT c.y FROM recursive_union.test c WHERE (a.x OPERATOR(pg_catalog.=) c.x))) ORDER BY x, y
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: skipping recursive planning for the subquery since it contains references to outer queries
|
||||
DEBUG: skipping recursive planning for the subquery since it contains references to outer queries
|
||||
ERROR: cannot push down this subquery
|
||||
DETAIL: Complex subqueries and CTEs are not supported within a UNION
|
||||
-- force unions to be planned while subqueries are being planned
|
||||
SELECT * FROM ((SELECT * FROM test) UNION (SELECT * FROM test) ORDER BY 1,2 LIMIT 5) as foo ORDER BY 1 DESC LIMIT 3;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 147_1 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 147_2 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Plan 147 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('147_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('147_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ORDER BY 1, 2 LIMIT 5
|
||||
DEBUG: Creating router plan
|
||||
|
@ -695,7 +821,10 @@ DEBUG: Plan is router executable
|
|||
|
||||
-- distinct and count distinct should work without any problems
|
||||
select count(DISTINCT t.x) FROM ((SELECT DISTINCT x FROM test) UNION (SELECT DISTINCT y FROM test)) as t(x) ORDER BY 1;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 150_1 for subquery SELECT DISTINCT y FROM recursive_union.test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 150_2 for subquery SELECT DISTINCT x FROM recursive_union.test
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -709,7 +838,10 @@ DEBUG: Plan is router executable
|
|||
(1 row)
|
||||
|
||||
select count(DISTINCT t.x) FROM ((SELECT count(DISTINCT x) FROM test) UNION (SELECT count(DISTINCT y) FROM test)) as t(x) ORDER BY 1;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 154_1 for subquery SELECT count(DISTINCT x) AS count FROM recursive_union.test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 154_2 for subquery SELECT count(DISTINCT y) AS count FROM recursive_union.test
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -724,7 +856,10 @@ DEBUG: Plan is router executable
|
|||
|
||||
-- other agg. distincts are also supported when group by includes partition key
|
||||
select avg(DISTINCT t.x) FROM ((SELECT avg(DISTINCT y) FROM test GROUP BY x) UNION (SELECT avg(DISTINCT y) FROM test GROUP BY x)) as t(x) ORDER BY 1;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 158_1 for subquery SELECT avg(DISTINCT y) AS avg FROM recursive_union.test GROUP BY x
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 158_2 for subquery SELECT avg(DISTINCT y) AS avg FROM recursive_union.test GROUP BY x
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -739,12 +874,16 @@ DEBUG: Plan is router executable
|
|||
|
||||
-- other agg. distincts are not supported when group by doesn't include partition key
|
||||
select count(DISTINCT t.x) FROM ((SELECT avg(DISTINCT y) FROM test GROUP BY y) UNION (SELECT avg(DISTINCT y) FROM test GROUP BY y)) as t(x) ORDER BY 1;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
ERROR: cannot compute aggregate (distinct)
|
||||
DETAIL: table partitioning is unsuitable for aggregate (distinct)
|
||||
-- one of the leaves is a repartition join
|
||||
SET citus.enable_repartition_joins TO ON;
|
||||
-- repartition is recursively planned before the set operation
|
||||
(SELECT x FROM test) INTERSECT (SELECT t1.x FROM test as t1, test as t2 WHERE t1.x = t2.y LIMIT 0) ORDER BY 1 DESC;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: push down of limit count: 0
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
|
@ -777,6 +916,7 @@ DETAIL: Creating dependency on merge taskId 20
|
|||
DEBUG: cannot use real time executor with repartition jobs
|
||||
HINT: Since you enabled citus.enable_repartition_joins Citus chose to use task-tracker.
|
||||
DEBUG: generating subplan 164_1 for subquery SELECT t1.x FROM recursive_union.test t1, recursive_union.test t2 WHERE (t1.x OPERATOR(pg_catalog.=) t2.y) LIMIT 0
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 164_2 for subquery SELECT x FROM recursive_union.test
|
||||
DEBUG: Plan 164 query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('164_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT SELECT intermediate_result.x FROM read_intermediate_result('164_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) ORDER BY 1 DESC
|
||||
DEBUG: Creating router plan
|
||||
|
@ -787,6 +927,8 @@ DEBUG: Plan is router executable
|
|||
|
||||
-- repartition is recursively planned with the set operation
|
||||
(SELECT x FROM test) INTERSECT (SELECT t1.x FROM test as t1, test as t2 WHERE t1.x = t2.y) ORDER BY 1 DESC;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
DEBUG: join prunable for task partitionId 0 and 3
|
||||
|
@ -818,6 +960,7 @@ DETAIL: Creating dependency on merge taskId 20
|
|||
DEBUG: cannot use real time executor with repartition jobs
|
||||
HINT: Since you enabled citus.enable_repartition_joins Citus chose to use task-tracker.
|
||||
DEBUG: generating subplan 167_1 for subquery SELECT t1.x FROM recursive_union.test t1, recursive_union.test t2 WHERE (t1.x OPERATOR(pg_catalog.=) t2.y)
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 167_2 for subquery SELECT x FROM recursive_union.test
|
||||
DEBUG: Plan 167 query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('167_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT SELECT intermediate_result.x FROM read_intermediate_result('167_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) ORDER BY 1 DESC
|
||||
DEBUG: Creating router plan
|
||||
|
@ -832,7 +975,10 @@ SET citus.enable_repartition_joins TO OFF;
|
|||
-- this should be recursively planned
|
||||
CREATE VIEW set_view_recursive AS (SELECT y FROM test) UNION (SELECT y FROM test);
|
||||
SELECT * FROM set_view_recursive ORDER BY 1 DESC;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 170_1 for subquery SELECT y FROM recursive_union.test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 170_2 for subquery SELECT y FROM recursive_union.test
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -849,6 +995,7 @@ DEBUG: Plan is router executable
|
|||
-- this should be pushed down
|
||||
CREATE VIEW set_view_pushdown AS (SELECT x FROM test) UNION (SELECT x FROM test);
|
||||
SELECT * FROM set_view_pushdown ORDER BY 1 DESC;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
x
|
||||
---
|
||||
2
|
||||
|
@ -858,12 +1005,16 @@ SELECT * FROM set_view_pushdown ORDER BY 1 DESC;
|
|||
-- this should be recursively planned
|
||||
CREATE VIEW set_view_recursive_second AS SELECT u.x, test.y FROM ((SELECT x, y FROM test) UNION (SELECT 1, 1 FROM test)) u JOIN test USING (x) ORDER BY 1,2;
|
||||
SELECT * FROM set_view_recursive_second ORDER BY 1,2;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 175_1 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 175_2 for subquery SELECT 1, 1 FROM recursive_union.test
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
DEBUG: generating subplan 175_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('175_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?" FROM read_intermediate_result('175_2'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer)
|
||||
DEBUG: Plan 175 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT u.x, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('175_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_union.test USING (x)) ORDER BY u.x, test.y) set_view_recursive_second ORDER BY x, y
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
x | y
|
||||
---+---
|
||||
1 | 1
|
||||
|
@ -872,17 +1023,24 @@ DEBUG: Plan 175 query after replacing subqueries and CTEs: SELECT x, y FROM (SE
|
|||
|
||||
-- this should create lots of recursive calls since both views and set operations lead to recursive plans :)
|
||||
((SELECT x FROM set_view_recursive_second) INTERSECT (SELECT * FROM set_view_recursive)) EXCEPT (SELECT * FROM set_view_pushdown);
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 179_1 for subquery SELECT x, y FROM recursive_union.test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 179_2 for subquery SELECT 1, 1 FROM recursive_union.test
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
DEBUG: generating subplan 179_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('179_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?" FROM read_intermediate_result('179_2'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer)
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 179_4 for subquery SELECT y FROM recursive_union.test
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 179_5 for subquery SELECT y FROM recursive_union.test
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
DEBUG: generating subplan 179_6 for subquery SELECT intermediate_result.y FROM read_intermediate_result('179_4'::text, 'binary'::citus_copy_format) intermediate_result(y integer) UNION SELECT intermediate_result.y FROM read_intermediate_result('179_5'::text, 'binary'::citus_copy_format) intermediate_result(y integer)
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 179_7 for subquery SELECT x FROM (SELECT u.x, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('179_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_union.test USING (x)) ORDER BY u.x, test.y) set_view_recursive_second
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 179_8 for subquery SELECT x FROM (SELECT test.x FROM recursive_union.test UNION SELECT test.x FROM recursive_union.test) set_view_pushdown
|
||||
DEBUG: Plan 179 query after replacing subqueries and CTEs: (SELECT intermediate_result.x FROM read_intermediate_result('179_7'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT SELECT set_view_recursive.y FROM (SELECT intermediate_result.y FROM read_intermediate_result('179_6'::text, 'binary'::citus_copy_format) intermediate_result(y integer)) set_view_recursive) EXCEPT SELECT intermediate_result.x FROM read_intermediate_result('179_8'::text, 'binary'::citus_copy_format) intermediate_result(x integer)
|
||||
DEBUG: Creating router plan
|
||||
|
|
|
@ -35,6 +35,7 @@ FROM
|
|||
single_hash_repartition_first t1, single_hash_repartition_second t2
|
||||
WHERE
|
||||
t1.id = t2.sum;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
LOG: join order: [ "single_hash_repartition_first" ][ single hash partition join "single_hash_repartition_second" ]
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [-1073741824,-1]
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [0,1073741823]
|
||||
|
@ -65,6 +66,7 @@ FROM
|
|||
single_hash_repartition_second t1, single_hash_repartition_first t2
|
||||
WHERE
|
||||
t2.sum = t1.id;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
LOG: join order: [ "single_hash_repartition_second" ][ single hash partition join "single_hash_repartition_first" ]
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [-1073741824,-1]
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [0,1073741823]
|
||||
|
@ -95,6 +97,7 @@ FROM
|
|||
ref_table r1, single_hash_repartition_second t1, single_hash_repartition_first t2
|
||||
WHERE
|
||||
r1.id = t1.id AND t2.sum = t1.id;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
LOG: join order: [ "single_hash_repartition_second" ][ reference join "ref_table" ][ single hash partition join "single_hash_repartition_first" ]
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [-1073741824,-1]
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [0,1073741823]
|
||||
|
@ -125,6 +128,7 @@ FROM
|
|||
single_hash_repartition_first t1, single_hash_repartition_first t2, single_hash_repartition_second t3
|
||||
WHERE
|
||||
t1.id = t2.id AND t1.sum = t3.id;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
LOG: join order: [ "single_hash_repartition_first" ][ local partition join "single_hash_repartition_first" ][ single hash partition join "single_hash_repartition_second" ]
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [-1073741824,-1]
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [0,1073741823]
|
||||
|
@ -167,6 +171,7 @@ FROM
|
|||
single_hash_repartition_first t1, single_hash_repartition_first t2, single_hash_repartition_second t3
|
||||
WHERE
|
||||
t1.sum = t2.sum AND t1.sum = t3.id;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
LOG: join order: [ "single_hash_repartition_first" ][ single hash partition join "single_hash_repartition_second" ][ dual partition join "single_hash_repartition_first" ]
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [-1073741824,-1]
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [0,1073741823]
|
||||
|
@ -225,6 +230,7 @@ FROM
|
|||
single_hash_repartition_first t1, single_hash_repartition_first t2, single_hash_repartition_second t3
|
||||
WHERE
|
||||
t1.id = t2.id AND t1.avg = t3.id;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: single partition column types do not match
|
||||
DEBUG: single partition column types do not match
|
||||
DEBUG: dual partition column types do not match
|
||||
|
@ -260,7 +266,9 @@ FROM
|
|||
cte1, single_hash_repartition_first
|
||||
WHERE
|
||||
cte1.data > single_hash_repartition_first.id;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: generating subplan 7_1 for CTE cte1: SELECT ((t1.id)::double precision OPERATOR(pg_catalog.*) t2.avg) AS data FROM single_hash_repartition.single_hash_repartition_first t1, single_hash_repartition.single_hash_repartition_second t2 WHERE ((t1.id OPERATOR(pg_catalog.=) t2.sum) AND (t1.sum OPERATOR(pg_catalog.>) 5000)) ORDER BY ((t1.id)::double precision OPERATOR(pg_catalog.*) t2.avg) DESC LIMIT 50
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
LOG: join order: [ "single_hash_repartition_first" ][ single hash partition join "single_hash_repartition_second" ]
|
||||
DEBUG: push down of limit count: 50
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [-1073741824,-1]
|
||||
|
@ -292,6 +300,7 @@ FROM
|
|||
single_hash_repartition_first t1, single_hash_repartition_second t2, single_hash_repartition_second t3
|
||||
WHERE
|
||||
t1.id = t2.sum AND t2.sum = t3.id;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
LOG: join order: [ "single_hash_repartition_first" ][ single hash partition join "single_hash_repartition_second" ][ single hash partition join "single_hash_repartition_second" ]
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [-1073741824,-1]
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [0,1073741823]
|
||||
|
@ -344,6 +353,7 @@ FROM
|
|||
WHERE
|
||||
t1.id = t2.sum AND t2.id = t3.sum
|
||||
LIMIT 10;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
LOG: join order: [ "single_hash_repartition_second" ][ single hash partition join "single_hash_repartition_second" ][ single hash partition join "single_hash_repartition_first" ]
|
||||
DEBUG: push down of limit count: 10
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [-1073741824,-1]
|
||||
|
@ -400,6 +410,7 @@ FROM
|
|||
single_hash_repartition_first t1, single_hash_repartition_second t2
|
||||
WHERE
|
||||
t1.id = t2.sum;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
LOG: join order: [ "single_hash_repartition_first" ][ single hash partition join "single_hash_repartition_second" ]
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741826] and [-1073741824,-2]
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741826] and [0,1073741822]
|
||||
|
@ -432,6 +443,7 @@ FROM
|
|||
single_hash_repartition_first t1, single_hash_repartition_second t2
|
||||
WHERE
|
||||
t1.sum = t2.id;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
LOG: join order: [ "single_hash_repartition_first" ][ single hash partition join "single_hash_repartition_second" ]
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [-1073741824,-1]
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [0,1073741823]
|
||||
|
|
|
@ -3,6 +3,17 @@
|
|||
-- ===================================================================
|
||||
SET search_path TO subquery_and_ctes;
|
||||
CREATE TABLE users_table_local AS SELECT * FROM users_table;
|
||||
CREATE TABLE dist_table (id int, value int);
|
||||
SELECT create_distributed_table('dist_table', 'id', colocate_with => 'users_table');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO dist_table (id, value) VALUES(1, 2),(2, 3),(3,4);
|
||||
CREATE FUNCTION func() RETURNS TABLE (id int, value int) AS $$
|
||||
SELECT 1, 2
|
||||
$$ LANGUAGE SQL;
|
||||
SET client_min_messages TO DEBUG1;
|
||||
-- CTEs are recursively planned, and subquery foo is also recursively planned
|
||||
-- final plan becomes a router plan
|
||||
|
@ -13,7 +24,7 @@ WITH cte AS (
|
|||
dist_cte AS (
|
||||
SELECT user_id FROM events_table
|
||||
)
|
||||
SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id
|
||||
SELECT dist_cte.user_id FROM local_cte JOIN dist_cte ON dist_cte.user_id=local_cte.user_id
|
||||
)
|
||||
SELECT
|
||||
count(*)
|
||||
|
@ -29,18 +40,90 @@ FROM
|
|||
ORDER BY 1 DESC LIMIT 5
|
||||
) as foo
|
||||
WHERE foo.user_id = cte.user_id;
|
||||
DEBUG: generating subplan 2_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_and_ctes.users_table_local), dist_cte AS (SELECT events_table.user_id FROM subquery_and_ctes.events_table) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id)))
|
||||
DEBUG: generating subplan 3_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table_local
|
||||
DEBUG: generating subplan 3_2 for CTE dist_cte: SELECT user_id FROM subquery_and_ctes.events_table
|
||||
DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('3_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id)))
|
||||
DEBUG: generating subplan 3_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_and_ctes.users_table_local), dist_cte AS (SELECT events_table.user_id FROM subquery_and_ctes.events_table) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id)))
|
||||
DEBUG: generating subplan 4_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table_local
|
||||
DEBUG: generating subplan 4_2 for CTE dist_cte: SELECT user_id FROM subquery_and_ctes.events_table
|
||||
DEBUG: Plan 4 query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('4_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('4_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id)))
|
||||
DEBUG: push down of limit count: 5
|
||||
DEBUG: generating subplan 2_2 for subquery SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5
|
||||
DEBUG: Plan 2 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('2_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte, (SELECT intermediate_result.user_id FROM read_intermediate_result('2_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) cte.user_id)
|
||||
DEBUG: generating subplan 3_2 for subquery SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5
|
||||
DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte, (SELECT intermediate_result.user_id FROM read_intermediate_result('3_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) cte.user_id)
|
||||
count
|
||||
-------
|
||||
1644
|
||||
(1 row)
|
||||
|
||||
-- CTEs are colocated, route entire query
|
||||
WITH cte1 AS (
|
||||
SELECT * FROM users_table WHERE user_id = 1
|
||||
), cte2 AS (
|
||||
SELECT * FROM events_table WHERE user_id = 1
|
||||
)
|
||||
SELECT cte1.user_id, cte1.value_1, cte2.user_id, cte2.event_type
|
||||
FROM cte1, cte2
|
||||
ORDER BY cte1.user_id, cte1.value_1, cte2.user_id, cte2.event_type
|
||||
LIMIT 5;
|
||||
user_id | value_1 | user_id | event_type
|
||||
---------+---------+---------+------------
|
||||
1 | 1 | 1 | 0
|
||||
1 | 1 | 1 | 0
|
||||
1 | 1 | 1 | 1
|
||||
1 | 1 | 1 | 1
|
||||
1 | 1 | 1 | 2
|
||||
(5 rows)
|
||||
|
||||
-- CTEs aren't colocated, CTEs become intermediate results
|
||||
WITH cte1 AS (
|
||||
SELECT * FROM users_table WHERE user_id = 1
|
||||
), cte2 AS (
|
||||
SELECT * FROM events_table WHERE user_id = 6
|
||||
)
|
||||
SELECT cte1.user_id, cte1.value_1, cte2.user_id, cte2.user_id
|
||||
FROM cte1, cte2
|
||||
ORDER BY cte1.user_id, cte1.value_1, cte2.user_id, cte2.event_type
|
||||
LIMIT 5;
|
||||
DEBUG: generating subplan 8_1 for CTE cte1: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table WHERE (user_id OPERATOR(pg_catalog.=) 1)
|
||||
DEBUG: generating subplan 8_2 for CTE cte2: SELECT user_id, "time", event_type, value_2, value_3, value_4 FROM subquery_and_ctes.events_table WHERE (user_id OPERATOR(pg_catalog.=) 6)
|
||||
DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT cte1.user_id, cte1.value_1, cte2.user_id, cte2.user_id FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) cte1, (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('8_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint)) cte2 ORDER BY cte1.user_id, cte1.value_1, cte2.user_id, cte2.event_type LIMIT 5
|
||||
user_id | value_1 | user_id | user_id
|
||||
---------+---------+---------+---------
|
||||
1 | 1 | 6 | 6
|
||||
1 | 1 | 6 | 6
|
||||
1 | 1 | 6 | 6
|
||||
1 | 1 | 6 | 6
|
||||
1 | 1 | 6 | 6
|
||||
(5 rows)
|
||||
|
||||
-- users_table & dist_table are colocated, route entire query
|
||||
WITH cte1 AS (
|
||||
SELECT * FROM users_table WHERE user_id = 1
|
||||
)
|
||||
UPDATE dist_table dt SET value = cte1.value_1
|
||||
FROM cte1 WHERE cte1.user_id = dt.id AND dt.id = 1;
|
||||
-- users_table & events_table & dist_table are colocated, route entire query
|
||||
WITH cte1 AS (
|
||||
SELECT * FROM users_table WHERE user_id = 1
|
||||
), cte2 AS (
|
||||
SELECT * FROM events_table WHERE user_id = 1
|
||||
)
|
||||
UPDATE dist_table dt SET value = cte1.value_1 + cte2.event_type
|
||||
FROM cte1, cte2 WHERE cte1.user_id = dt.id AND dt.id = 1;
|
||||
-- all relations are not colocated, CTEs become intermediate results
|
||||
WITH cte1 AS (
|
||||
SELECT * FROM users_table WHERE user_id = 1
|
||||
), cte2 AS (
|
||||
SELECT * FROM events_table WHERE user_id = 6
|
||||
)
|
||||
UPDATE dist_table dt SET value = cte1.value_1 + cte2.event_type
|
||||
FROM cte1, cte2 WHERE cte1.user_id = dt.id AND dt.id = 1;
|
||||
DEBUG: generating subplan 13_1 for CTE cte1: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table WHERE (user_id OPERATOR(pg_catalog.=) 1)
|
||||
DEBUG: generating subplan 13_2 for CTE cte2: SELECT user_id, "time", event_type, value_2, value_3, value_4 FROM subquery_and_ctes.events_table WHERE (user_id OPERATOR(pg_catalog.=) 6)
|
||||
DEBUG: Plan 13 query after replacing subqueries and CTEs: UPDATE subquery_and_ctes.dist_table dt SET value = (cte1.value_1 OPERATOR(pg_catalog.+) cte2.event_type) FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('13_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) cte1, (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('13_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint)) cte2 WHERE ((cte1.user_id OPERATOR(pg_catalog.=) dt.id) AND (dt.id OPERATOR(pg_catalog.=) 1))
|
||||
-- volatile function calls should not be routed
|
||||
WITH cte1 AS (SELECT id, value FROM func())
|
||||
UPDATE dist_table dt SET value = cte1.value
|
||||
FROM cte1 WHERE dt.id = 1;
|
||||
DEBUG: generating subplan 16_1 for CTE cte1: SELECT id, value FROM subquery_and_ctes.func() func(id, value)
|
||||
DEBUG: Plan 16 query after replacing subqueries and CTEs: UPDATE subquery_and_ctes.dist_table dt SET value = cte1.value FROM (SELECT intermediate_result.id, intermediate_result.value FROM read_intermediate_result('16_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer, value integer)) cte1 WHERE (dt.id OPERATOR(pg_catalog.=) 1)
|
||||
-- CTEs are recursively planned, and subquery foo is also recursively planned
|
||||
-- final plan becomes a real-time plan since we also have events_table in the
|
||||
-- range table entries
|
||||
|
@ -51,7 +134,7 @@ WITH cte AS (
|
|||
dist_cte AS (
|
||||
SELECT user_id FROM events_table
|
||||
)
|
||||
SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id
|
||||
SELECT dist_cte.user_id FROM local_cte JOIN dist_cte ON dist_cte.user_id=local_cte.user_id
|
||||
)
|
||||
SELECT
|
||||
count(*)
|
||||
|
@ -67,13 +150,13 @@ FROM
|
|||
ORDER BY 1 DESC LIMIT 5
|
||||
) as foo, events_table
|
||||
WHERE foo.user_id = cte.user_id AND events_table.user_id = cte.user_id;
|
||||
DEBUG: generating subplan 6_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_and_ctes.users_table_local), dist_cte AS (SELECT events_table.user_id FROM subquery_and_ctes.events_table) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id)))
|
||||
DEBUG: generating subplan 7_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table_local
|
||||
DEBUG: generating subplan 7_2 for CTE dist_cte: SELECT user_id FROM subquery_and_ctes.events_table
|
||||
DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('7_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('7_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id)))
|
||||
DEBUG: generating subplan 17_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_and_ctes.users_table_local), dist_cte AS (SELECT events_table.user_id FROM subquery_and_ctes.events_table) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id)))
|
||||
DEBUG: generating subplan 18_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table_local
|
||||
DEBUG: generating subplan 18_2 for CTE dist_cte: SELECT user_id FROM subquery_and_ctes.events_table
|
||||
DEBUG: Plan 18 query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('18_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('18_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id)))
|
||||
DEBUG: push down of limit count: 5
|
||||
DEBUG: generating subplan 6_2 for subquery SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5
|
||||
DEBUG: Plan 6 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('6_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte, (SELECT intermediate_result.user_id FROM read_intermediate_result('6_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, subquery_and_ctes.events_table WHERE ((foo.user_id OPERATOR(pg_catalog.=) cte.user_id) AND (events_table.user_id OPERATOR(pg_catalog.=) cte.user_id))
|
||||
DEBUG: generating subplan 17_2 for subquery SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5
|
||||
DEBUG: Plan 17 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('17_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte, (SELECT intermediate_result.user_id FROM read_intermediate_result('17_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, subquery_and_ctes.events_table WHERE ((foo.user_id OPERATOR(pg_catalog.=) cte.user_id) AND (events_table.user_id OPERATOR(pg_catalog.=) cte.user_id))
|
||||
count
|
||||
-------
|
||||
30608
|
||||
|
@ -89,7 +172,7 @@ WITH cte AS (
|
|||
dist_cte AS (
|
||||
SELECT user_id FROM events_table
|
||||
)
|
||||
SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id
|
||||
SELECT dist_cte.user_id FROM local_cte JOIN dist_cte ON dist_cte.user_id=local_cte.user_id
|
||||
)
|
||||
SELECT DISTINCT cte.user_id
|
||||
FROM users_table, cte
|
||||
|
@ -97,13 +180,13 @@ WHERE
|
|||
users_table.user_id = cte.user_id AND
|
||||
users_table.user_id IN (SELECT DISTINCT value_2 FROM users_table WHERE value_1 >= 1 AND value_1 <= 20 ORDER BY 1 LIMIT 5)
|
||||
ORDER BY 1 DESC;
|
||||
DEBUG: generating subplan 10_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_and_ctes.users_table_local), dist_cte AS (SELECT events_table.user_id FROM subquery_and_ctes.events_table) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id)))
|
||||
DEBUG: generating subplan 11_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table_local
|
||||
DEBUG: generating subplan 11_2 for CTE dist_cte: SELECT user_id FROM subquery_and_ctes.events_table
|
||||
DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('11_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('11_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id)))
|
||||
DEBUG: generating subplan 21_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_and_ctes.users_table_local), dist_cte AS (SELECT events_table.user_id FROM subquery_and_ctes.events_table) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id)))
|
||||
DEBUG: generating subplan 22_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table_local
|
||||
DEBUG: generating subplan 22_2 for CTE dist_cte: SELECT user_id FROM subquery_and_ctes.events_table
|
||||
DEBUG: Plan 22 query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('22_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('22_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id)))
|
||||
DEBUG: push down of limit count: 5
|
||||
DEBUG: generating subplan 10_2 for subquery SELECT DISTINCT value_2 FROM subquery_and_ctes.users_table WHERE ((value_1 OPERATOR(pg_catalog.>=) 1) AND (value_1 OPERATOR(pg_catalog.<=) 20)) ORDER BY value_2 LIMIT 5
|
||||
DEBUG: Plan 10 query after replacing subqueries and CTEs: SELECT DISTINCT cte.user_id FROM subquery_and_ctes.users_table, (SELECT intermediate_result.user_id FROM read_intermediate_result('10_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte WHERE ((users_table.user_id OPERATOR(pg_catalog.=) cte.user_id) AND (users_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('10_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)))) ORDER BY cte.user_id DESC
|
||||
DEBUG: generating subplan 21_2 for subquery SELECT DISTINCT value_2 FROM subquery_and_ctes.users_table WHERE ((value_1 OPERATOR(pg_catalog.>=) 1) AND (value_1 OPERATOR(pg_catalog.<=) 20)) ORDER BY value_2 LIMIT 5
|
||||
DEBUG: Plan 21 query after replacing subqueries and CTEs: SELECT DISTINCT cte.user_id FROM subquery_and_ctes.users_table, (SELECT intermediate_result.user_id FROM read_intermediate_result('21_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte WHERE ((users_table.user_id OPERATOR(pg_catalog.=) cte.user_id) AND (users_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('21_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)))) ORDER BY cte.user_id DESC
|
||||
user_id
|
||||
---------
|
||||
4
|
||||
|
@ -121,19 +204,19 @@ WITH cte AS (
|
|||
dist_cte AS (
|
||||
SELECT user_id FROM events_table
|
||||
)
|
||||
SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id
|
||||
SELECT dist_cte.user_id FROM local_cte JOIN dist_cte ON dist_cte.user_id=local_cte.user_id
|
||||
)
|
||||
SELECT DISTINCT cte.user_id
|
||||
FROM cte
|
||||
WHERE
|
||||
cte.user_id IN (SELECT DISTINCT user_id FROM users_table WHERE value_1 >= 1 AND value_1 <= 20)
|
||||
ORDER BY 1 DESC;
|
||||
DEBUG: generating subplan 14_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_and_ctes.users_table_local), dist_cte AS (SELECT events_table.user_id FROM subquery_and_ctes.events_table) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id)))
|
||||
DEBUG: generating subplan 15_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table_local
|
||||
DEBUG: generating subplan 15_2 for CTE dist_cte: SELECT user_id FROM subquery_and_ctes.events_table
|
||||
DEBUG: Plan 15 query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('15_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('15_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id)))
|
||||
DEBUG: generating subplan 14_2 for subquery SELECT DISTINCT user_id FROM subquery_and_ctes.users_table WHERE ((value_1 OPERATOR(pg_catalog.>=) 1) AND (value_1 OPERATOR(pg_catalog.<=) 20))
|
||||
DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT DISTINCT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('14_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('14_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) ORDER BY user_id DESC
|
||||
DEBUG: generating subplan 25_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_and_ctes.users_table_local), dist_cte AS (SELECT events_table.user_id FROM subquery_and_ctes.events_table) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id)))
|
||||
DEBUG: generating subplan 26_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table_local
|
||||
DEBUG: generating subplan 26_2 for CTE dist_cte: SELECT user_id FROM subquery_and_ctes.events_table
|
||||
DEBUG: Plan 26 query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('26_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('26_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id)))
|
||||
DEBUG: generating subplan 25_2 for subquery SELECT DISTINCT user_id FROM subquery_and_ctes.users_table WHERE ((value_1 OPERATOR(pg_catalog.>=) 1) AND (value_1 OPERATOR(pg_catalog.<=) 20))
|
||||
DEBUG: Plan 25 query after replacing subqueries and CTEs: SELECT DISTINCT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('25_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('25_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) ORDER BY user_id DESC
|
||||
user_id
|
||||
---------
|
||||
6
|
||||
|
@ -161,8 +244,8 @@ FROM
|
|||
) SELECT * FROM cte ORDER BY 1 DESC
|
||||
) as foo
|
||||
ORDER BY 1 DESC;
|
||||
DEBUG: generating subplan 18_1 for CTE cte: SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))
|
||||
DEBUG: Plan 18 query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT cte.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('18_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte ORDER BY cte.user_id DESC) foo ORDER BY user_id DESC
|
||||
DEBUG: generating subplan 29_1 for CTE cte: SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))
|
||||
DEBUG: Plan 29 query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT cte.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('29_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte ORDER BY cte.user_id DESC) foo ORDER BY user_id DESC
|
||||
user_id
|
||||
---------
|
||||
6
|
||||
|
@ -201,8 +284,8 @@ FROM
|
|||
) as bar
|
||||
WHERE foo.user_id = bar.user_id
|
||||
ORDER BY 1 DESC;
|
||||
DEBUG: generating subplan 20_1 for CTE cte: SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))
|
||||
DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT bar.user_id FROM (SELECT cte.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('20_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte ORDER BY cte.user_id DESC) foo, (SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) ORDER BY bar.user_id DESC
|
||||
DEBUG: generating subplan 31_1 for CTE cte: SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))
|
||||
DEBUG: Plan 31 query after replacing subqueries and CTEs: SELECT bar.user_id FROM (SELECT cte.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('31_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte ORDER BY cte.user_id DESC) foo, (SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) ORDER BY bar.user_id DESC
|
||||
user_id
|
||||
---------
|
||||
6
|
||||
|
@ -254,11 +337,11 @@ FROM
|
|||
) as bar
|
||||
WHERE foo.user_id = bar.user_id
|
||||
ORDER BY 1 DESC LIMIT 5;
|
||||
DEBUG: generating subplan 22_1 for CTE cte: SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))
|
||||
DEBUG: generating subplan 22_2 for CTE cte: SELECT events_table.event_type, users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (users_table.value_1 OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2])))
|
||||
DEBUG: generating subplan 33_1 for CTE cte: SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))
|
||||
DEBUG: generating subplan 33_2 for CTE cte: SELECT events_table.event_type, users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (users_table.value_1 OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2])))
|
||||
DEBUG: push down of limit count: 2
|
||||
DEBUG: generating subplan 22_3 for subquery SELECT users_table.user_id, some_events.event_type FROM subquery_and_ctes.users_table, (SELECT cte.event_type, cte.user_id FROM (SELECT intermediate_result.event_type, intermediate_result.user_id FROM read_intermediate_result('22_2'::text, 'binary'::citus_copy_format) intermediate_result(event_type integer, user_id integer)) cte ORDER BY cte.event_type DESC) some_events WHERE ((users_table.user_id OPERATOR(pg_catalog.=) some_events.user_id) AND (some_events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY some_events.event_type, users_table.user_id LIMIT 2
|
||||
DEBUG: Plan 22 query after replacing subqueries and CTEs: SELECT DISTINCT bar.user_id FROM (SELECT cte.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('22_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte ORDER BY cte.user_id DESC) foo, (SELECT intermediate_result.user_id, intermediate_result.event_type FROM read_intermediate_result('22_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event_type integer)) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) ORDER BY bar.user_id DESC LIMIT 5
|
||||
DEBUG: generating subplan 33_3 for subquery SELECT users_table.user_id, some_events.event_type FROM subquery_and_ctes.users_table, (SELECT cte.event_type, cte.user_id FROM (SELECT intermediate_result.event_type, intermediate_result.user_id FROM read_intermediate_result('33_2'::text, 'binary'::citus_copy_format) intermediate_result(event_type integer, user_id integer)) cte ORDER BY cte.event_type DESC) some_events WHERE ((users_table.user_id OPERATOR(pg_catalog.=) some_events.user_id) AND (some_events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY some_events.event_type, users_table.user_id LIMIT 2
|
||||
DEBUG: Plan 33 query after replacing subqueries and CTEs: SELECT DISTINCT bar.user_id FROM (SELECT cte.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('33_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte ORDER BY cte.user_id DESC) foo, (SELECT intermediate_result.user_id, intermediate_result.event_type FROM read_intermediate_result('33_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event_type integer)) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) ORDER BY bar.user_id DESC LIMIT 5
|
||||
user_id
|
||||
---------
|
||||
1
|
||||
|
@ -276,7 +359,7 @@ SELECT * FROM
|
|||
dist_cte AS (
|
||||
SELECT user_id FROM events_table
|
||||
)
|
||||
SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id
|
||||
SELECT dist_cte.user_id FROM local_cte JOIN dist_cte ON dist_cte.user_id=local_cte.user_id
|
||||
)
|
||||
SELECT DISTINCT cte.user_id
|
||||
FROM users_table, cte
|
||||
|
@ -291,14 +374,14 @@ SELECT * FROM
|
|||
foo.user_id = events_table.value_2
|
||||
ORDER BY 3 DESC, 2 DESC, 1 DESC
|
||||
LIMIT 5;
|
||||
DEBUG: generating subplan 26_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_and_ctes.users_table_local), dist_cte AS (SELECT events_table.user_id FROM subquery_and_ctes.events_table) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id)))
|
||||
DEBUG: generating subplan 27_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table_local
|
||||
DEBUG: generating subplan 27_2 for CTE dist_cte: SELECT user_id FROM subquery_and_ctes.events_table
|
||||
DEBUG: Plan 27 query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('27_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('27_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id)))
|
||||
DEBUG: generating subplan 26_2 for CTE cte_in_where: SELECT DISTINCT value_2 FROM subquery_and_ctes.users_table WHERE ((value_1 OPERATOR(pg_catalog.>=) 1) AND (value_1 OPERATOR(pg_catalog.<=) 20)) ORDER BY value_2 LIMIT 5
|
||||
DEBUG: generating subplan 37_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_and_ctes.users_table_local), dist_cte AS (SELECT events_table.user_id FROM subquery_and_ctes.events_table) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id)))
|
||||
DEBUG: generating subplan 38_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table_local
|
||||
DEBUG: generating subplan 38_2 for CTE dist_cte: SELECT user_id FROM subquery_and_ctes.events_table
|
||||
DEBUG: Plan 38 query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('38_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('38_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id)))
|
||||
DEBUG: generating subplan 37_2 for CTE cte_in_where: SELECT DISTINCT value_2 FROM subquery_and_ctes.users_table WHERE ((value_1 OPERATOR(pg_catalog.>=) 1) AND (value_1 OPERATOR(pg_catalog.<=) 20)) ORDER BY value_2 LIMIT 5
|
||||
DEBUG: push down of limit count: 5
|
||||
DEBUG: generating subplan 26_3 for subquery SELECT DISTINCT cte.user_id FROM subquery_and_ctes.users_table, (SELECT intermediate_result.user_id FROM read_intermediate_result('26_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte WHERE ((users_table.user_id OPERATOR(pg_catalog.=) cte.user_id) AND (users_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT cte_in_where.value_2 FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('26_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) cte_in_where))) ORDER BY cte.user_id DESC
|
||||
DEBUG: Plan 26 query after replacing subqueries and CTEs: SELECT foo.user_id, events_table.user_id, events_table."time", events_table.event_type, events_table.value_2, events_table.value_3, events_table.value_4 FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('26_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, subquery_and_ctes.events_table WHERE (foo.user_id OPERATOR(pg_catalog.=) events_table.value_2) ORDER BY events_table."time" DESC, events_table.user_id DESC, foo.user_id DESC LIMIT 5
|
||||
DEBUG: generating subplan 37_3 for subquery SELECT DISTINCT cte.user_id FROM subquery_and_ctes.users_table, (SELECT intermediate_result.user_id FROM read_intermediate_result('37_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte WHERE ((users_table.user_id OPERATOR(pg_catalog.=) cte.user_id) AND (users_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT cte_in_where.value_2 FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('37_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) cte_in_where))) ORDER BY cte.user_id DESC
|
||||
DEBUG: Plan 37 query after replacing subqueries and CTEs: SELECT foo.user_id, events_table.user_id, events_table."time", events_table.event_type, events_table.value_2, events_table.value_3, events_table.value_4 FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('37_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, subquery_and_ctes.events_table WHERE (foo.user_id OPERATOR(pg_catalog.=) events_table.value_2) ORDER BY events_table."time" DESC, events_table.user_id DESC, foo.user_id DESC LIMIT 5
|
||||
DEBUG: push down of limit count: 5
|
||||
user_id | user_id | time | event_type | value_2 | value_3 | value_4
|
||||
---------+---------+---------------------------------+------------+---------+---------+---------
|
||||
|
@ -324,7 +407,7 @@ WITH cte AS (
|
|||
events_table.user_id = foo.value_2 AND
|
||||
events_table.user_id IN (SELECT DISTINCT value_1 FROM users_table ORDER BY 1 LIMIT 3)
|
||||
)
|
||||
SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id
|
||||
SELECT dist_cte.user_id FROM local_cte JOIN dist_cte ON dist_cte.user_id=local_cte.user_id
|
||||
)
|
||||
SELECT
|
||||
count(*)
|
||||
|
@ -340,17 +423,17 @@ FROM
|
|||
ORDER BY 1 DESC LIMIT 5
|
||||
) as foo
|
||||
WHERE foo.user_id = cte.user_id;
|
||||
DEBUG: generating subplan 31_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_and_ctes.users_table_local), dist_cte AS (SELECT events_table.user_id FROM subquery_and_ctes.events_table, (SELECT DISTINCT users_table.value_2 FROM subquery_and_ctes.users_table OFFSET 0) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_2) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT DISTINCT users_table.value_1 FROM subquery_and_ctes.users_table ORDER BY users_table.value_1 LIMIT 3)))) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id)))
|
||||
DEBUG: generating subplan 32_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table_local
|
||||
DEBUG: generating subplan 32_2 for CTE dist_cte: SELECT events_table.user_id FROM subquery_and_ctes.events_table, (SELECT DISTINCT users_table.value_2 FROM subquery_and_ctes.users_table OFFSET 0) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_2) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT DISTINCT users_table.value_1 FROM subquery_and_ctes.users_table ORDER BY users_table.value_1 LIMIT 3)))
|
||||
DEBUG: generating subplan 42_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_and_ctes.users_table_local), dist_cte AS (SELECT events_table.user_id FROM subquery_and_ctes.events_table, (SELECT DISTINCT users_table.value_2 FROM subquery_and_ctes.users_table OFFSET 0) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_2) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT DISTINCT users_table.value_1 FROM subquery_and_ctes.users_table ORDER BY users_table.value_1 LIMIT 3)))) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id)))
|
||||
DEBUG: generating subplan 43_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table_local
|
||||
DEBUG: generating subplan 43_2 for CTE dist_cte: SELECT events_table.user_id FROM subquery_and_ctes.events_table, (SELECT DISTINCT users_table.value_2 FROM subquery_and_ctes.users_table OFFSET 0) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_2) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT DISTINCT users_table.value_1 FROM subquery_and_ctes.users_table ORDER BY users_table.value_1 LIMIT 3)))
|
||||
DEBUG: push down of limit count: 3
|
||||
DEBUG: generating subplan 33_1 for subquery SELECT DISTINCT value_1 FROM subquery_and_ctes.users_table ORDER BY value_1 LIMIT 3
|
||||
DEBUG: generating subplan 33_2 for subquery SELECT DISTINCT value_2 FROM subquery_and_ctes.users_table OFFSET 0
|
||||
DEBUG: Plan 33 query after replacing subqueries and CTEs: SELECT events_table.user_id FROM subquery_and_ctes.events_table, (SELECT intermediate_result.value_2 FROM read_intermediate_result('33_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_2) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_1 FROM read_intermediate_result('33_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer))))
|
||||
DEBUG: Plan 32 query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('32_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('32_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id)))
|
||||
DEBUG: generating subplan 44_1 for subquery SELECT DISTINCT value_1 FROM subquery_and_ctes.users_table ORDER BY value_1 LIMIT 3
|
||||
DEBUG: generating subplan 44_2 for subquery SELECT DISTINCT value_2 FROM subquery_and_ctes.users_table OFFSET 0
|
||||
DEBUG: Plan 44 query after replacing subqueries and CTEs: SELECT events_table.user_id FROM subquery_and_ctes.events_table, (SELECT intermediate_result.value_2 FROM read_intermediate_result('44_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_2) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_1 FROM read_intermediate_result('44_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer))))
|
||||
DEBUG: Plan 43 query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('43_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('43_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id)))
|
||||
DEBUG: push down of limit count: 5
|
||||
DEBUG: generating subplan 31_2 for subquery SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5
|
||||
DEBUG: Plan 31 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('31_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte, (SELECT intermediate_result.user_id FROM read_intermediate_result('31_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) cte.user_id)
|
||||
DEBUG: generating subplan 42_2 for subquery SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5
|
||||
DEBUG: Plan 42 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('42_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte, (SELECT intermediate_result.user_id FROM read_intermediate_result('42_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) cte.user_id)
|
||||
count
|
||||
-------
|
||||
432
|
||||
|
@ -375,7 +458,7 @@ FROM
|
|||
events_table.user_id = foo.value_2 AND
|
||||
events_table.user_id IN (SELECT DISTINCT value_1 FROM users_table ORDER BY 1 LIMIT 3)
|
||||
)
|
||||
SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id
|
||||
SELECT dist_cte.user_id FROM local_cte JOIN dist_cte ON dist_cte.user_id=local_cte.user_id
|
||||
)
|
||||
SELECT
|
||||
count(*) as cnt
|
||||
|
@ -394,18 +477,18 @@ FROM
|
|||
) as foo, users_table WHERE foo.cnt > users_table.value_2
|
||||
ORDER BY 3 DESC, 1 DESC, 2 DESC, 4 DESC
|
||||
LIMIT 5;
|
||||
DEBUG: generating subplan 37_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_and_ctes.users_table_local), dist_cte AS (SELECT events_table.user_id FROM subquery_and_ctes.events_table, (SELECT DISTINCT users_table.value_2 FROM subquery_and_ctes.users_table OFFSET 0) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_2) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT DISTINCT users_table.value_1 FROM subquery_and_ctes.users_table ORDER BY users_table.value_1 LIMIT 3)))) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id)))
|
||||
DEBUG: generating subplan 38_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table_local
|
||||
DEBUG: generating subplan 38_2 for CTE dist_cte: SELECT events_table.user_id FROM subquery_and_ctes.events_table, (SELECT DISTINCT users_table.value_2 FROM subquery_and_ctes.users_table OFFSET 0) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_2) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT DISTINCT users_table.value_1 FROM subquery_and_ctes.users_table ORDER BY users_table.value_1 LIMIT 3)))
|
||||
DEBUG: generating subplan 48_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_and_ctes.users_table_local), dist_cte AS (SELECT events_table.user_id FROM subquery_and_ctes.events_table, (SELECT DISTINCT users_table.value_2 FROM subquery_and_ctes.users_table OFFSET 0) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_2) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT DISTINCT users_table.value_1 FROM subquery_and_ctes.users_table ORDER BY users_table.value_1 LIMIT 3)))) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id)))
|
||||
DEBUG: generating subplan 49_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table_local
|
||||
DEBUG: generating subplan 49_2 for CTE dist_cte: SELECT events_table.user_id FROM subquery_and_ctes.events_table, (SELECT DISTINCT users_table.value_2 FROM subquery_and_ctes.users_table OFFSET 0) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_2) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT DISTINCT users_table.value_1 FROM subquery_and_ctes.users_table ORDER BY users_table.value_1 LIMIT 3)))
|
||||
DEBUG: push down of limit count: 3
|
||||
DEBUG: generating subplan 39_1 for subquery SELECT DISTINCT value_1 FROM subquery_and_ctes.users_table ORDER BY value_1 LIMIT 3
|
||||
DEBUG: generating subplan 39_2 for subquery SELECT DISTINCT value_2 FROM subquery_and_ctes.users_table OFFSET 0
|
||||
DEBUG: Plan 39 query after replacing subqueries and CTEs: SELECT events_table.user_id FROM subquery_and_ctes.events_table, (SELECT intermediate_result.value_2 FROM read_intermediate_result('39_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_2) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_1 FROM read_intermediate_result('39_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer))))
|
||||
DEBUG: Plan 38 query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('38_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('38_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id)))
|
||||
DEBUG: generating subplan 50_1 for subquery SELECT DISTINCT value_1 FROM subquery_and_ctes.users_table ORDER BY value_1 LIMIT 3
|
||||
DEBUG: generating subplan 50_2 for subquery SELECT DISTINCT value_2 FROM subquery_and_ctes.users_table OFFSET 0
|
||||
DEBUG: Plan 50 query after replacing subqueries and CTEs: SELECT events_table.user_id FROM subquery_and_ctes.events_table, (SELECT intermediate_result.value_2 FROM read_intermediate_result('50_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo WHERE ((events_table.user_id OPERATOR(pg_catalog.=) foo.value_2) AND (events_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_1 FROM read_intermediate_result('50_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer))))
|
||||
DEBUG: Plan 49 query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('49_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('49_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id)))
|
||||
DEBUG: push down of limit count: 5
|
||||
DEBUG: generating subplan 37_2 for subquery SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5
|
||||
DEBUG: generating subplan 37_3 for subquery SELECT count(*) AS cnt FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('37_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte, (SELECT intermediate_result.user_id FROM read_intermediate_result('37_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) cte.user_id)
|
||||
DEBUG: Plan 37 query after replacing subqueries and CTEs: SELECT foo.cnt, users_table.user_id, users_table."time", users_table.value_1, users_table.value_2, users_table.value_3, users_table.value_4 FROM (SELECT intermediate_result.cnt FROM read_intermediate_result('37_3'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint)) foo, subquery_and_ctes.users_table WHERE (foo.cnt OPERATOR(pg_catalog.>) users_table.value_2) ORDER BY users_table."time" DESC, foo.cnt DESC, users_table.user_id DESC, users_table.value_1 DESC LIMIT 5
|
||||
DEBUG: generating subplan 48_2 for subquery SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5
|
||||
DEBUG: generating subplan 48_3 for subquery SELECT count(*) AS cnt FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('48_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte, (SELECT intermediate_result.user_id FROM read_intermediate_result('48_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) cte.user_id)
|
||||
DEBUG: Plan 48 query after replacing subqueries and CTEs: SELECT foo.cnt, users_table.user_id, users_table."time", users_table.value_1, users_table.value_2, users_table.value_3, users_table.value_4 FROM (SELECT intermediate_result.cnt FROM read_intermediate_result('48_3'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint)) foo, subquery_and_ctes.users_table WHERE (foo.cnt OPERATOR(pg_catalog.>) users_table.value_2) ORDER BY users_table."time" DESC, foo.cnt DESC, users_table.user_id DESC, users_table.value_1 DESC LIMIT 5
|
||||
DEBUG: push down of limit count: 5
|
||||
cnt | user_id | time | value_1 | value_2 | value_3 | value_4
|
||||
-----+---------+---------------------------------+---------+---------+---------+---------
|
||||
|
@ -482,8 +565,10 @@ ERROR: (3/3) failed to execute one of the tasks
|
|||
CONTEXT: PL/pgSQL function inline_code_block line 29 at RAISE
|
||||
SET client_min_messages TO DEFAULT;
|
||||
DROP SCHEMA subquery_and_ctes CASCADE;
|
||||
NOTICE: drop cascades to 3 other objects
|
||||
NOTICE: drop cascades to 5 other objects
|
||||
DETAIL: drop cascades to table users_table
|
||||
drop cascades to table events_table
|
||||
drop cascades to table users_table_local
|
||||
drop cascades to table dist_table
|
||||
drop cascades to function func()
|
||||
SET search_path TO public;
|
||||
|
|
|
@ -30,7 +30,6 @@ WITH ids_to_delete AS (
|
|||
SELECT tenant_id FROM distributed_table WHERE dept = 1
|
||||
)
|
||||
DELETE FROM reference_table WHERE id IN (SELECT tenant_id FROM ids_to_delete);
|
||||
DEBUG: common table expressions are not supported in distributed modifications
|
||||
DEBUG: generating subplan 4_1 for CTE ids_to_delete: SELECT tenant_id FROM with_dml.distributed_table WHERE (dept OPERATOR(pg_catalog.=) 1)
|
||||
DEBUG: Plan 4 query after replacing subqueries and CTEs: DELETE FROM with_dml.reference_table WHERE (id OPERATOR(pg_catalog.=) ANY (SELECT ids_to_delete.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('4_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) ids_to_delete))
|
||||
-- update the name of the users whose dept is 2
|
||||
|
@ -38,7 +37,6 @@ WITH ids_to_update AS (
|
|||
SELECT tenant_id FROM distributed_table WHERE dept = 2
|
||||
)
|
||||
UPDATE reference_table SET name = 'new_' || name WHERE id IN (SELECT tenant_id FROM ids_to_update);
|
||||
DEBUG: common table expressions are not supported in distributed modifications
|
||||
DEBUG: generating subplan 6_1 for CTE ids_to_update: SELECT tenant_id FROM with_dml.distributed_table WHERE (dept OPERATOR(pg_catalog.=) 2)
|
||||
DEBUG: Plan 6 query after replacing subqueries and CTEs: UPDATE with_dml.reference_table SET name = ('new_'::text OPERATOR(pg_catalog.||) name) WHERE (id OPERATOR(pg_catalog.=) ANY (SELECT ids_to_update.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('6_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) ids_to_update))
|
||||
-- now the CTE is also modifying
|
||||
|
@ -51,7 +49,6 @@ ids_deleted_4 AS
|
|||
DELETE FROM distributed_table WHERE dept = 4 RETURNING tenant_id
|
||||
)
|
||||
DELETE FROM reference_table WHERE id IN (SELECT * FROM ids_deleted_3 UNION SELECT * FROM ids_deleted_4);
|
||||
DEBUG: common table expressions are not supported in distributed modifications
|
||||
DEBUG: generating subplan 8_1 for CTE ids_deleted_3: DELETE FROM with_dml.distributed_table WHERE (dept OPERATOR(pg_catalog.=) 3) RETURNING tenant_id
|
||||
DEBUG: generating subplan 8_2 for CTE ids_deleted_4: DELETE FROM with_dml.distributed_table WHERE (dept OPERATOR(pg_catalog.=) 4) RETURNING tenant_id
|
||||
DEBUG: generating subplan 8_3 for subquery SELECT ids_deleted_3.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) ids_deleted_3 UNION SELECT ids_deleted_4.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('8_2'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) ids_deleted_4
|
||||
|
@ -70,7 +67,6 @@ FROM
|
|||
WHERE
|
||||
some_tenants.tenant_id = ids_to_delete.tenant_id
|
||||
AND distributed_table.tenant_id = some_tenants.tenant_id;
|
||||
DEBUG: common table expressions are not supported in distributed modifications
|
||||
DEBUG: generating subplan 12_1 for CTE ids_to_delete: SELECT tenant_id FROM with_dml.distributed_table WHERE (dept OPERATOR(pg_catalog.=) 5)
|
||||
DEBUG: Plan 12 query after replacing subqueries and CTEs: UPDATE with_dml.distributed_table SET dept = (distributed_table.dept OPERATOR(pg_catalog.+) 1) FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('12_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) ids_to_delete, (SELECT distributed_table_1.tenant_id FROM with_dml.distributed_table distributed_table_1 WHERE ((distributed_table_1.tenant_id)::integer OPERATOR(pg_catalog.<) 60)) some_tenants WHERE ((some_tenants.tenant_id OPERATOR(pg_catalog.=) ids_to_delete.tenant_id) AND (distributed_table.tenant_id OPERATOR(pg_catalog.=) some_tenants.tenant_id))
|
||||
-- this query errors out since we've some hard
|
||||
|
@ -131,7 +127,6 @@ INSERT INTO second_distributed_table
|
|||
FROM copy_to_other_table;
|
||||
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
DEBUG: data-modifying statements are not supported in the WITH clauses of distributed queries
|
||||
DEBUG: generating subplan 20_1 for CTE copy_to_other_table: INSERT INTO with_dml.distributed_table (tenant_id, dept) SELECT tenant_id, dept FROM with_dml.second_distributed_table WHERE (dept OPERATOR(pg_catalog.=) 3) ON CONFLICT(tenant_id) DO UPDATE SET dept = 4 RETURNING distributed_table.tenant_id, distributed_table.dept
|
||||
DEBUG: generating subplan 20_2 for CTE main_table_deleted: DELETE FROM with_dml.distributed_table WHERE ((dept OPERATOR(pg_catalog.<) 10) AND (NOT (EXISTS (SELECT 1 FROM with_dml.second_distributed_table WHERE ((second_distributed_table.dept OPERATOR(pg_catalog.=) 1) AND (second_distributed_table.tenant_id OPERATOR(pg_catalog.=) distributed_table.tenant_id)))))) RETURNING tenant_id, dept
|
||||
DEBUG: generating subplan 20_3 for subquery SELECT main_table_deleted.tenant_id, main_table_deleted.dept FROM (SELECT intermediate_result.tenant_id, intermediate_result.dept FROM read_intermediate_result('20_2'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text, dept integer)) main_table_deleted EXCEPT SELECT copy_to_other_table.tenant_id, copy_to_other_table.dept FROM (SELECT intermediate_result.tenant_id, intermediate_result.dept FROM read_intermediate_result('20_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text, dept integer)) copy_to_other_table
|
||||
|
@ -144,7 +139,6 @@ SET dept =
|
|||
SELECT DISTINCT tenant_id::int FROM distributed_table
|
||||
) select * from vals where tenant_id = 8 )
|
||||
WHERE dept = 8;
|
||||
DEBUG: complex joins are only supported when all distributed tables are joined on their distribution columns with equal operator
|
||||
DEBUG: generating subplan 24_1 for CTE vals: SELECT DISTINCT (tenant_id)::integer AS tenant_id FROM with_dml.distributed_table
|
||||
DEBUG: Plan 24 query after replacing subqueries and CTEs: UPDATE with_dml.second_distributed_table SET dept = (SELECT vals.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('24_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id integer)) vals WHERE (vals.tenant_id OPERATOR(pg_catalog.=) 8)) WHERE (dept OPERATOR(pg_catalog.=) 8)
|
||||
-- Subquery inside the UPDATE statement
|
||||
|
@ -154,7 +148,6 @@ SET dept =
|
|||
|
||||
(SELECT DISTINCT tenant_id::int FROM distributed_table WHERE tenant_id = '9')
|
||||
WHERE dept = 8;
|
||||
DEBUG: complex joins are only supported when all distributed tables are joined on their distribution columns with equal operator
|
||||
DEBUG: generating subplan 26_1 for subquery SELECT DISTINCT (tenant_id)::integer AS tenant_id FROM with_dml.distributed_table WHERE (tenant_id OPERATOR(pg_catalog.=) '9'::text)
|
||||
DEBUG: Plan 26 query after replacing subqueries and CTEs: UPDATE with_dml.second_distributed_table SET dept = (SELECT intermediate_result.tenant_id FROM read_intermediate_result('26_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id integer)) WHERE (dept OPERATOR(pg_catalog.=) 8)
|
||||
-- delete all remaining tenants
|
||||
|
@ -162,16 +155,12 @@ WITH ids_to_delete AS (
|
|||
SELECT tenant_id FROM distributed_table
|
||||
)
|
||||
DELETE FROM distributed_table WHERE tenant_id = ANY(SELECT tenant_id FROM ids_to_delete);
|
||||
DEBUG: common table expressions are not supported in distributed modifications
|
||||
DEBUG: generating subplan 28_1 for CTE ids_to_delete: SELECT tenant_id FROM with_dml.distributed_table
|
||||
DEBUG: Plan 28 query after replacing subqueries and CTEs: DELETE FROM with_dml.distributed_table WHERE (tenant_id OPERATOR(pg_catalog.=) ANY (SELECT ids_to_delete.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('28_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) ids_to_delete))
|
||||
WITH ids_to_delete AS (
|
||||
SELECT id FROM reference_table
|
||||
)
|
||||
DELETE FROM reference_table WHERE id = ANY(SELECT id FROM ids_to_delete);
|
||||
DEBUG: common table expressions are not supported in distributed modifications
|
||||
DEBUG: generating subplan 30_1 for CTE ids_to_delete: SELECT id FROM with_dml.reference_table
|
||||
DEBUG: Plan 30 query after replacing subqueries and CTEs: DELETE FROM with_dml.reference_table WHERE (id OPERATOR(pg_catalog.=) ANY (SELECT ids_to_delete.id FROM (SELECT intermediate_result.id FROM read_intermediate_result('30_1'::text, 'binary'::citus_copy_format) intermediate_result(id text)) ids_to_delete))
|
||||
RESET client_min_messages;
|
||||
DROP SCHEMA with_dml CASCADE;
|
||||
NOTICE: drop cascades to 3 other objects
|
||||
|
|
|
@ -35,7 +35,6 @@ BEGIN;
|
|||
DELETE FROM raw_table WHERE created_at < '2014-02-10 20:00:00' AND tenant_id IN (SELECT * from ids_to_delete) RETURNING tenant_id
|
||||
)
|
||||
UPDATE raw_table SET income = income * 2 WHERE tenant_id IN (SELECT tenant_id FROM deleted_ids);
|
||||
DEBUG: common table expressions are not supported in distributed modifications
|
||||
DEBUG: generating subplan 3_1 for CTE ids_to_delete: SELECT tenant_id FROM with_transactions.raw_table WHERE (income OPERATOR(pg_catalog.<) (250)::double precision)
|
||||
DEBUG: generating subplan 3_2 for CTE deleted_ids: DELETE FROM with_transactions.raw_table WHERE ((created_at OPERATOR(pg_catalog.<) 'Mon Feb 10 20:00:00 2014 PST'::timestamp with time zone) AND (tenant_id OPERATOR(pg_catalog.=) ANY (SELECT ids_to_delete.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id integer)) ids_to_delete))) RETURNING tenant_id
|
||||
DEBUG: Plan 3 query after replacing subqueries and CTEs: UPDATE with_transactions.raw_table SET income = (income OPERATOR(pg_catalog.*) (2)::double precision) WHERE (tenant_id OPERATOR(pg_catalog.=) ANY (SELECT deleted_ids.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('3_2'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id integer)) deleted_ids))
|
||||
|
@ -70,7 +69,6 @@ BEGIN;
|
|||
)
|
||||
UPDATE raw_table SET created_at = '2001-02-10 20:00:00'
|
||||
WHERE tenant_id IN (SELECT tenant_id FROM ids_inserted) AND tenant_id < (SELECT count FROM distinct_count);
|
||||
DEBUG: common table expressions are not supported in distributed modifications
|
||||
DEBUG: generating subplan 9_1 for CTE distinct_count: SELECT count(DISTINCT created_at) AS count FROM with_transactions.raw_table
|
||||
DEBUG: generating subplan 9_2 for CTE ids_inserted: INSERT INTO with_transactions.raw_table (tenant_id, income, created_at) VALUES (11, 1000, now()) RETURNING tenant_id
|
||||
DEBUG: Plan 9 query after replacing subqueries and CTEs: UPDATE with_transactions.raw_table SET created_at = 'Sat Feb 10 20:00:00 2001 PST'::timestamp with time zone WHERE ((tenant_id OPERATOR(pg_catalog.=) ANY (SELECT ids_inserted.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('9_2'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id integer)) ids_inserted)) AND (tenant_id OPERATOR(pg_catalog.<) (SELECT distinct_count.count FROM (SELECT intermediate_result.count FROM read_intermediate_result('9_1'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) distinct_count)))
|
||||
|
@ -82,7 +80,6 @@ WITH ids_inserted AS
|
|||
INSERT INTO raw_table VALUES (11, 1000, now()), (12, 1000, now()), (13, 1000, now()) RETURNING tenant_id
|
||||
)
|
||||
UPDATE raw_table SET created_at = '2001-02-10 20:00:00' WHERE tenant_id IN (SELECT tenant_id FROM ids_inserted);
|
||||
DEBUG: common table expressions are not supported in distributed modifications
|
||||
DEBUG: generating subplan 12_1 for CTE ids_inserted: INSERT INTO with_transactions.raw_table (tenant_id, income, created_at) VALUES (11,1000,now()), (12,1000,now()), (13,1000,now()) RETURNING raw_table.tenant_id
|
||||
DEBUG: Plan 12 query after replacing subqueries and CTEs: UPDATE with_transactions.raw_table SET created_at = 'Sat Feb 10 20:00:00 2001 PST'::timestamp with time zone WHERE (tenant_id OPERATOR(pg_catalog.=) ANY (SELECT ids_inserted.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('12_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id integer)) ids_inserted))
|
||||
ERROR: cannot establish a new connection for placement 800007, since DML has been executed on a connection that is in use
|
||||
|
@ -113,7 +110,6 @@ WITH ids_inserted AS
|
|||
INSERT INTO raw_table (tenant_id) VALUES (11), (12), (13), (14) RETURNING tenant_id
|
||||
)
|
||||
SELECT income FROM second_raw_table WHERE tenant_id IN (SELECT * FROM ids_inserted) ORDER BY 1 DESC LIMIT 3;
|
||||
DEBUG: data-modifying statements are not supported in the WITH clauses of distributed queries
|
||||
DEBUG: generating subplan 17_1 for CTE ids_inserted: INSERT INTO with_transactions.raw_table (tenant_id) VALUES (11), (12), (13), (14) RETURNING raw_table.tenant_id
|
||||
DEBUG: Plan 17 query after replacing subqueries and CTEs: SELECT income FROM with_transactions.second_raw_table WHERE (tenant_id OPERATOR(pg_catalog.=) ANY (SELECT ids_inserted.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('17_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id integer)) ids_inserted)) ORDER BY income DESC LIMIT 3
|
||||
DEBUG: push down of limit count: 3
|
||||
|
|
|
@ -111,7 +111,7 @@ GetOptions(
|
|||
#
|
||||
# XXX: There's some issues with el capitan's SIP here, causing
|
||||
# DYLD_LIBRARY_PATH not being inherited if SIP is enabled. That's a
|
||||
# know problem, present in postgres itself as well.
|
||||
# known problem, present in postgres itself as well.
|
||||
if (defined $libdir)
|
||||
{
|
||||
$ENV{LD_LIBRARY_PATH} = "$libdir:".($ENV{LD_LIBRARY_PATH} || '');
|
||||
|
@ -559,11 +559,11 @@ sub ShutdownServers()
|
|||
or warn "Could not shutdown worker server";
|
||||
}
|
||||
}
|
||||
if ($mitmPid != 0)
|
||||
{
|
||||
# '-' means signal the process group, 2 is SIGINT
|
||||
kill(-2, $mitmPid) or warn "could not interrupt mitmdump";
|
||||
}
|
||||
if ($mitmPid != 0)
|
||||
{
|
||||
# '-' means signal the process group, 2 is SIGINT
|
||||
kill(-2, $mitmPid) or warn "could not interrupt mitmdump";
|
||||
}
|
||||
$serversAreShutdown = "TRUE";
|
||||
}
|
||||
}
|
||||
|
|
|
@ -80,6 +80,12 @@ step "s1-select-from-t1-within-cte"
|
|||
SELECT * FROM first_value;
|
||||
}
|
||||
|
||||
step "s1-update-rt-with-cte-select-from-rt"
|
||||
{
|
||||
WITH foo AS (SELECT * FROM ref_table FOR UPDATE)
|
||||
UPDATE ref_table SET val_1 = 4 FROM foo WHERE ref_table.id = foo.id;
|
||||
}
|
||||
|
||||
step "s1-select-from-t1-with-subquery"
|
||||
{
|
||||
SELECT * FROM (SELECT * FROM test_table_1_rf1 FOR UPDATE) foo WHERE id = 1;
|
||||
|
@ -157,3 +163,4 @@ permutation "s1-begin" "s1-select-from-t1-within-cte" "s2-begin" "s2-update-t1"
|
|||
permutation "s1-begin" "s1-select-from-t1-with-subquery" "s2-begin" "s2-update-t1" "s1-finish" "s2-finish"
|
||||
permutation "s1-begin" "s1-select-from-rt-with-subquery" "s2-begin" "s2-update-rt" "s1-finish" "s2-finish"
|
||||
permutation "s1-begin" "s1-select-from-t1-with-view" "s2-begin" "s2-update-t1" "s1-finish" "s2-finish"
|
||||
permutation "s1-begin" "s1-update-rt-with-cte-select-from-rt" "s2-begin" "s2-update-rt" "s1-finish" "s2-finish"
|
||||
|
|
|
@ -208,7 +208,7 @@ FROM
|
|||
) as foo WHERE second_distributed_table.tenant_id = foo.tenant_id
|
||||
RETURNING *;
|
||||
|
||||
-- we don't support subquerues/CTEs inside VALUES
|
||||
-- we don't support subqueries/CTEs inside VALUES
|
||||
INSERT INTO
|
||||
second_distributed_table (tenant_id, dept)
|
||||
VALUES ('3', (WITH vals AS (SELECT 3) select * from vals));
|
||||
|
|
|
@ -3,9 +3,16 @@
|
|||
-- ===================================================================
|
||||
SET search_path TO subquery_and_ctes;
|
||||
|
||||
|
||||
CREATE TABLE users_table_local AS SELECT * FROM users_table;
|
||||
|
||||
CREATE TABLE dist_table (id int, value int);
|
||||
SELECT create_distributed_table('dist_table', 'id', colocate_with => 'users_table');
|
||||
INSERT INTO dist_table (id, value) VALUES(1, 2),(2, 3),(3,4);
|
||||
|
||||
CREATE FUNCTION func() RETURNS TABLE (id int, value int) AS $$
|
||||
SELECT 1, 2
|
||||
$$ LANGUAGE SQL;
|
||||
|
||||
SET client_min_messages TO DEBUG1;
|
||||
|
||||
-- CTEs are recursively planned, and subquery foo is also recursively planned
|
||||
|
@ -17,7 +24,7 @@ WITH cte AS (
|
|||
dist_cte AS (
|
||||
SELECT user_id FROM events_table
|
||||
)
|
||||
SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id
|
||||
SELECT dist_cte.user_id FROM local_cte JOIN dist_cte ON dist_cte.user_id=local_cte.user_id
|
||||
)
|
||||
SELECT
|
||||
count(*)
|
||||
|
@ -34,6 +41,58 @@ FROM
|
|||
) as foo
|
||||
WHERE foo.user_id = cte.user_id;
|
||||
|
||||
-- CTEs are colocated, route entire query
|
||||
WITH cte1 AS (
|
||||
SELECT * FROM users_table WHERE user_id = 1
|
||||
), cte2 AS (
|
||||
SELECT * FROM events_table WHERE user_id = 1
|
||||
)
|
||||
SELECT cte1.user_id, cte1.value_1, cte2.user_id, cte2.event_type
|
||||
FROM cte1, cte2
|
||||
ORDER BY cte1.user_id, cte1.value_1, cte2.user_id, cte2.event_type
|
||||
LIMIT 5;
|
||||
|
||||
-- CTEs aren't colocated, CTEs become intermediate results
|
||||
WITH cte1 AS (
|
||||
SELECT * FROM users_table WHERE user_id = 1
|
||||
), cte2 AS (
|
||||
SELECT * FROM events_table WHERE user_id = 6
|
||||
)
|
||||
SELECT cte1.user_id, cte1.value_1, cte2.user_id, cte2.user_id
|
||||
FROM cte1, cte2
|
||||
ORDER BY cte1.user_id, cte1.value_1, cte2.user_id, cte2.event_type
|
||||
LIMIT 5;
|
||||
|
||||
-- users_table & dist_table are colocated, route entire query
|
||||
WITH cte1 AS (
|
||||
SELECT * FROM users_table WHERE user_id = 1
|
||||
)
|
||||
UPDATE dist_table dt SET value = cte1.value_1
|
||||
FROM cte1 WHERE cte1.user_id = dt.id AND dt.id = 1;
|
||||
|
||||
-- users_table & events_table & dist_table are colocated, route entire query
|
||||
WITH cte1 AS (
|
||||
SELECT * FROM users_table WHERE user_id = 1
|
||||
), cte2 AS (
|
||||
SELECT * FROM events_table WHERE user_id = 1
|
||||
)
|
||||
UPDATE dist_table dt SET value = cte1.value_1 + cte2.event_type
|
||||
FROM cte1, cte2 WHERE cte1.user_id = dt.id AND dt.id = 1;
|
||||
|
||||
-- all relations are not colocated, CTEs become intermediate results
|
||||
WITH cte1 AS (
|
||||
SELECT * FROM users_table WHERE user_id = 1
|
||||
), cte2 AS (
|
||||
SELECT * FROM events_table WHERE user_id = 6
|
||||
)
|
||||
UPDATE dist_table dt SET value = cte1.value_1 + cte2.event_type
|
||||
FROM cte1, cte2 WHERE cte1.user_id = dt.id AND dt.id = 1;
|
||||
|
||||
-- volatile function calls should not be routed
|
||||
WITH cte1 AS (SELECT id, value FROM func())
|
||||
UPDATE dist_table dt SET value = cte1.value
|
||||
FROM cte1 WHERE dt.id = 1;
|
||||
|
||||
-- CTEs are recursively planned, and subquery foo is also recursively planned
|
||||
-- final plan becomes a real-time plan since we also have events_table in the
|
||||
-- range table entries
|
||||
|
@ -44,7 +103,7 @@ WITH cte AS (
|
|||
dist_cte AS (
|
||||
SELECT user_id FROM events_table
|
||||
)
|
||||
SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id
|
||||
SELECT dist_cte.user_id FROM local_cte JOIN dist_cte ON dist_cte.user_id=local_cte.user_id
|
||||
)
|
||||
SELECT
|
||||
count(*)
|
||||
|
@ -71,7 +130,7 @@ WITH cte AS (
|
|||
dist_cte AS (
|
||||
SELECT user_id FROM events_table
|
||||
)
|
||||
SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id
|
||||
SELECT dist_cte.user_id FROM local_cte JOIN dist_cte ON dist_cte.user_id=local_cte.user_id
|
||||
)
|
||||
SELECT DISTINCT cte.user_id
|
||||
FROM users_table, cte
|
||||
|
@ -89,7 +148,7 @@ WITH cte AS (
|
|||
dist_cte AS (
|
||||
SELECT user_id FROM events_table
|
||||
)
|
||||
SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id
|
||||
SELECT dist_cte.user_id FROM local_cte JOIN dist_cte ON dist_cte.user_id=local_cte.user_id
|
||||
)
|
||||
SELECT DISTINCT cte.user_id
|
||||
FROM cte
|
||||
|
@ -201,7 +260,7 @@ SELECT * FROM
|
|||
dist_cte AS (
|
||||
SELECT user_id FROM events_table
|
||||
)
|
||||
SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id
|
||||
SELECT dist_cte.user_id FROM local_cte JOIN dist_cte ON dist_cte.user_id=local_cte.user_id
|
||||
)
|
||||
SELECT DISTINCT cte.user_id
|
||||
FROM users_table, cte
|
||||
|
@ -233,7 +292,7 @@ WITH cte AS (
|
|||
events_table.user_id = foo.value_2 AND
|
||||
events_table.user_id IN (SELECT DISTINCT value_1 FROM users_table ORDER BY 1 LIMIT 3)
|
||||
)
|
||||
SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id
|
||||
SELECT dist_cte.user_id FROM local_cte JOIN dist_cte ON dist_cte.user_id=local_cte.user_id
|
||||
)
|
||||
SELECT
|
||||
count(*)
|
||||
|
@ -270,7 +329,7 @@ FROM
|
|||
events_table.user_id = foo.value_2 AND
|
||||
events_table.user_id IN (SELECT DISTINCT value_1 FROM users_table ORDER BY 1 LIMIT 3)
|
||||
)
|
||||
SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id
|
||||
SELECT dist_cte.user_id FROM local_cte JOIN dist_cte ON dist_cte.user_id=local_cte.user_id
|
||||
)
|
||||
SELECT
|
||||
count(*) as cnt
|
||||
|
|
Loading…
Reference in New Issue