mirror of https://github.com/citusdata/citus.git
Merge 1a20c4fe6d
into 55a0d1f730
commit
b4290d1ab3
|
@ -897,6 +897,7 @@ AdaptiveExecutor(CitusScanState *scanState)
|
||||||
|
|
||||||
FinishDistributedExecution(execution);
|
FinishDistributedExecution(execution);
|
||||||
|
|
||||||
|
job->jobExecuted = true;
|
||||||
if (SortReturning && distributedPlan->expectResults && commandType != CMD_SELECT)
|
if (SortReturning && distributedPlan->expectResults && commandType != CMD_SELECT)
|
||||||
{
|
{
|
||||||
SortTupleStore(scanState);
|
SortTupleStore(scanState);
|
||||||
|
|
|
@ -41,12 +41,17 @@ ExecuteSubPlans(DistributedPlan *distributedPlan)
|
||||||
uint64 planId = distributedPlan->planId;
|
uint64 planId = distributedPlan->planId;
|
||||||
List *subPlanList = distributedPlan->subPlanList;
|
List *subPlanList = distributedPlan->subPlanList;
|
||||||
|
|
||||||
|
if (distributedPlan->workerJob)
|
||||||
|
distributedPlan->workerJob->jobExecuted = true;
|
||||||
|
distributedPlan->subPlansExecuted = true;
|
||||||
|
|
||||||
if (subPlanList == NIL)
|
if (subPlanList == NIL)
|
||||||
{
|
{
|
||||||
/* no subplans to execute */
|
/* no subplans to execute */
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
HTAB *intermediateResultsHash = MakeIntermediateResultHTAB();
|
HTAB *intermediateResultsHash = MakeIntermediateResultHTAB();
|
||||||
RecordSubplanExecutionsOnNodes(intermediateResultsHash, distributedPlan);
|
RecordSubplanExecutionsOnNodes(intermediateResultsHash, distributedPlan);
|
||||||
|
|
||||||
|
|
|
@ -1395,6 +1395,7 @@ FinalizePlan(PlannedStmt *localPlan, DistributedPlan *distributedPlan)
|
||||||
PlannedStmt *finalPlan = NULL;
|
PlannedStmt *finalPlan = NULL;
|
||||||
CustomScan *customScan = makeNode(CustomScan);
|
CustomScan *customScan = makeNode(CustomScan);
|
||||||
MultiExecutorType executorType = MULTI_EXECUTOR_INVALID_FIRST;
|
MultiExecutorType executorType = MULTI_EXECUTOR_INVALID_FIRST;
|
||||||
|
distributedPlan->subPlansExecuted = false;
|
||||||
|
|
||||||
/* this field is used in JobExecutorType */
|
/* this field is used in JobExecutorType */
|
||||||
distributedPlan->relationIdList = localPlan->relationOids;
|
distributedPlan->relationIdList = localPlan->relationOids;
|
||||||
|
|
|
@ -189,7 +189,8 @@ typedef struct SerializeDestReceiver
|
||||||
|
|
||||||
|
|
||||||
/* Explain functions for distributed queries */
|
/* Explain functions for distributed queries */
|
||||||
static void ExplainSubPlans(DistributedPlan *distributedPlan, ExplainState *es);
|
static void ExplainSubPlans(DistributedPlan *distributedPlan, ExplainState *es,
|
||||||
|
bool subPlansExecuted);
|
||||||
static void ExplainJob(CitusScanState *scanState, Job *job, ExplainState *es,
|
static void ExplainJob(CitusScanState *scanState, Job *job, ExplainState *es,
|
||||||
ParamListInfo params);
|
ParamListInfo params);
|
||||||
static void ExplainMapMergeJob(MapMergeJob *mapMergeJob, ExplainState *es);
|
static void ExplainMapMergeJob(MapMergeJob *mapMergeJob, ExplainState *es);
|
||||||
|
@ -296,7 +297,7 @@ CitusExplainScan(CustomScanState *node, List *ancestors, struct ExplainState *es
|
||||||
|
|
||||||
if (distributedPlan->subPlanList != NIL)
|
if (distributedPlan->subPlanList != NIL)
|
||||||
{
|
{
|
||||||
ExplainSubPlans(distributedPlan, es);
|
ExplainSubPlans(distributedPlan, es, scanState->finishedPreScan);
|
||||||
}
|
}
|
||||||
|
|
||||||
ExplainJob(scanState, distributedPlan->workerJob, es, params);
|
ExplainJob(scanState, distributedPlan->workerJob, es, params);
|
||||||
|
@ -434,13 +435,28 @@ NonPushableMergeCommandExplainScan(CustomScanState *node, List *ancestors,
|
||||||
* planning time and set it to 0.
|
* planning time and set it to 0.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
ExplainSubPlans(DistributedPlan *distributedPlan, ExplainState *es)
|
ExplainSubPlans(DistributedPlan *distributedPlan, ExplainState *es, bool subPlansExecuted)
|
||||||
{
|
{
|
||||||
ListCell *subPlanCell = NULL;
|
ListCell *subPlanCell = NULL;
|
||||||
uint64 planId = distributedPlan->planId;
|
uint64 planId = distributedPlan->planId;
|
||||||
|
bool analyzeEnabled = es->analyze;
|
||||||
|
bool timingEnabled = es->timing;
|
||||||
|
bool walEnabled = es->wal;
|
||||||
|
|
||||||
ExplainOpenGroup("Subplans", "Subplans", false, es);
|
ExplainOpenGroup("Subplans", "Subplans", false, es);
|
||||||
|
|
||||||
|
if (subPlansExecuted)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Subplans are already executed recursively when
|
||||||
|
* executing the top-level of the plan. Here, we just
|
||||||
|
* need to explain them but not execute them again.
|
||||||
|
*/
|
||||||
|
es->analyze = false;
|
||||||
|
es->timing = false;
|
||||||
|
es->wal = false;
|
||||||
|
}
|
||||||
|
|
||||||
foreach(subPlanCell, distributedPlan->subPlanList)
|
foreach(subPlanCell, distributedPlan->subPlanList)
|
||||||
{
|
{
|
||||||
DistributedSubPlan *subPlan = (DistributedSubPlan *) lfirst(subPlanCell);
|
DistributedSubPlan *subPlan = (DistributedSubPlan *) lfirst(subPlanCell);
|
||||||
|
@ -488,9 +504,9 @@ ExplainSubPlans(DistributedPlan *distributedPlan, ExplainState *es)
|
||||||
|
|
||||||
ExplainOpenGroup("Subplan", NULL, true, es);
|
ExplainOpenGroup("Subplan", NULL, true, es);
|
||||||
|
|
||||||
if (es->analyze)
|
if (analyzeEnabled)
|
||||||
{
|
{
|
||||||
if (es->timing)
|
if (timingEnabled)
|
||||||
{
|
{
|
||||||
ExplainPropertyFloat("Subplan Duration", "ms", subPlan->durationMillisecs,
|
ExplainPropertyFloat("Subplan Duration", "ms", subPlan->durationMillisecs,
|
||||||
2, es);
|
2, es);
|
||||||
|
@ -553,6 +569,14 @@ ExplainSubPlans(DistributedPlan *distributedPlan, ExplainState *es)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Restore the settings */
|
||||||
|
if (subPlansExecuted)
|
||||||
|
{
|
||||||
|
es->analyze = analyzeEnabled;
|
||||||
|
es->timing = timingEnabled;
|
||||||
|
es->wal = walEnabled;
|
||||||
|
}
|
||||||
|
|
||||||
ExplainCloseGroup("Subplans", "Subplans", false, es);
|
ExplainCloseGroup("Subplans", "Subplans", false, es);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -578,6 +602,7 @@ static bool
|
||||||
ShowReceivedTupleData(CitusScanState *scanState, ExplainState *es)
|
ShowReceivedTupleData(CitusScanState *scanState, ExplainState *es)
|
||||||
{
|
{
|
||||||
TupleDesc tupDesc = ScanStateGetTupleDescriptor(scanState);
|
TupleDesc tupDesc = ScanStateGetTupleDescriptor(scanState);
|
||||||
|
|
||||||
return es->analyze && tupDesc != NULL && tupDesc->natts > 0;
|
return es->analyze && tupDesc != NULL && tupDesc->natts > 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -600,7 +625,7 @@ ExplainJob(CitusScanState *scanState, Job *job, ExplainState *es,
|
||||||
ExplainOpenGroup("Job", "Job", true, es);
|
ExplainOpenGroup("Job", "Job", true, es);
|
||||||
|
|
||||||
ExplainPropertyInteger("Task Count", NULL, taskCount, es);
|
ExplainPropertyInteger("Task Count", NULL, taskCount, es);
|
||||||
if (ShowReceivedTupleData(scanState, es))
|
if (ShowReceivedTupleData(scanState, es) || job->jobExecuted)
|
||||||
{
|
{
|
||||||
Task *task = NULL;
|
Task *task = NULL;
|
||||||
uint64 totalReceivedTupleDataForAllTasks = 0;
|
uint64 totalReceivedTupleDataForAllTasks = 0;
|
||||||
|
|
|
@ -1692,6 +1692,7 @@ CreateJob(Query *query)
|
||||||
job->subqueryPushdown = false;
|
job->subqueryPushdown = false;
|
||||||
job->requiresCoordinatorEvaluation = false;
|
job->requiresCoordinatorEvaluation = false;
|
||||||
job->deferredPruning = false;
|
job->deferredPruning = false;
|
||||||
|
job->jobExecuted = false;
|
||||||
|
|
||||||
return job;
|
return job;
|
||||||
}
|
}
|
||||||
|
@ -1791,6 +1792,7 @@ CreateTask(TaskType taskType)
|
||||||
task->modifyWithSubquery = false;
|
task->modifyWithSubquery = false;
|
||||||
task->partiallyLocalOrRemote = false;
|
task->partiallyLocalOrRemote = false;
|
||||||
task->relationShardList = NIL;
|
task->relationShardList = NIL;
|
||||||
|
task->taskCompleted = false;
|
||||||
|
|
||||||
return task;
|
return task;
|
||||||
}
|
}
|
||||||
|
|
|
@ -153,6 +153,7 @@ typedef struct Job
|
||||||
*/
|
*/
|
||||||
bool parametersInJobQueryResolved;
|
bool parametersInJobQueryResolved;
|
||||||
uint32 colocationId; /* common colocation group ID of the relations */
|
uint32 colocationId; /* common colocation group ID of the relations */
|
||||||
|
bool jobExecuted;
|
||||||
} Job;
|
} Job;
|
||||||
|
|
||||||
|
|
||||||
|
@ -334,6 +335,7 @@ typedef struct Task
|
||||||
|
|
||||||
Const *partitionKeyValue;
|
Const *partitionKeyValue;
|
||||||
int colocationId;
|
int colocationId;
|
||||||
|
bool taskCompleted;
|
||||||
} Task;
|
} Task;
|
||||||
|
|
||||||
|
|
||||||
|
@ -471,6 +473,7 @@ typedef struct DistributedPlan
|
||||||
* of source rows to be repartitioned for colocation with the target.
|
* of source rows to be repartitioned for colocation with the target.
|
||||||
*/
|
*/
|
||||||
int sourceResultRepartitionColumnIndex;
|
int sourceResultRepartitionColumnIndex;
|
||||||
|
bool subPlansExecuted;
|
||||||
} DistributedPlan;
|
} DistributedPlan;
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -330,20 +330,18 @@ EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) SELECT * FROM distribute
|
||||||
EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF)
|
EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF)
|
||||||
WITH r AS ( SELECT GREATEST(random(), 2) z,* FROM distributed_table)
|
WITH r AS ( SELECT GREATEST(random(), 2) z,* FROM distributed_table)
|
||||||
SELECT 1 FROM r WHERE z < 3;
|
SELECT 1 FROM r WHERE z < 3;
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
|
Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
|
||||||
-> Distributed Subplan XXX_1
|
-> Distributed Subplan XXX_1
|
||||||
Intermediate Data Size: 40 bytes
|
Intermediate Data Size: 40 bytes
|
||||||
Result destination: Write locally
|
Result destination: Write locally
|
||||||
-> Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tuple data received from nodes: 22 bytes
|
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Tuple data received from node: 22 bytes
|
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Seq Scan on distributed_table_1470001 distributed_table (actual rows=1 loops=1)
|
-> Seq Scan on distributed_table_1470001 distributed_table
|
||||||
Task Count: 1
|
Task Count: 1
|
||||||
Tuple data received from nodes: 4 bytes
|
Tuple data received from nodes: 4 bytes
|
||||||
Tasks Shown: All
|
Tasks Shown: All
|
||||||
|
@ -352,7 +350,7 @@ SELECT 1 FROM r WHERE z < 3;
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Function Scan on read_intermediate_result intermediate_result (actual rows=1 loops=1)
|
-> Function Scan on read_intermediate_result intermediate_result (actual rows=1 loops=1)
|
||||||
Filter: (z < '3'::double precision)
|
Filter: (z < '3'::double precision)
|
||||||
(20 rows)
|
(18 rows)
|
||||||
|
|
||||||
EXPLAIN (COSTS OFF) DELETE FROM distributed_table WHERE key = 1 AND age = 20;
|
EXPLAIN (COSTS OFF) DELETE FROM distributed_table WHERE key = 1 AND age = 20;
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
|
|
|
@ -268,20 +268,18 @@ EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) SELECT * FROM distribute
|
||||||
EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF)
|
EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF)
|
||||||
WITH r AS ( SELECT GREATEST(random(), 2) z,* FROM distributed_table)
|
WITH r AS ( SELECT GREATEST(random(), 2) z,* FROM distributed_table)
|
||||||
SELECT 1 FROM r WHERE z < 3;
|
SELECT 1 FROM r WHERE z < 3;
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
|
Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
|
||||||
-> Distributed Subplan XXX_1
|
-> Distributed Subplan XXX_1
|
||||||
Intermediate Data Size: 40 bytes
|
Intermediate Data Size: 40 bytes
|
||||||
Result destination: Write locally
|
Result destination: Write locally
|
||||||
-> Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tuple data received from nodes: 22 bytes
|
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Tuple data received from node: 22 bytes
|
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Seq Scan on distributed_table_1500001 distributed_table (actual rows=1 loops=1)
|
-> Seq Scan on distributed_table_1500001 distributed_table
|
||||||
Task Count: 1
|
Task Count: 1
|
||||||
Tuple data received from nodes: 4 bytes
|
Tuple data received from nodes: 4 bytes
|
||||||
Tasks Shown: All
|
Tasks Shown: All
|
||||||
|
@ -290,7 +288,7 @@ SELECT 1 FROM r WHERE z < 3;
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Function Scan on read_intermediate_result intermediate_result (actual rows=1 loops=1)
|
-> Function Scan on read_intermediate_result intermediate_result (actual rows=1 loops=1)
|
||||||
Filter: (z < '3'::double precision)
|
Filter: (z < '3'::double precision)
|
||||||
(20 rows)
|
(18 rows)
|
||||||
|
|
||||||
EXPLAIN (COSTS OFF) DELETE FROM distributed_table WHERE key = 1 AND age = 20;
|
EXPLAIN (COSTS OFF) DELETE FROM distributed_table WHERE key = 1 AND age = 20;
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
|
|
|
@ -385,10 +385,9 @@ Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
|
||||||
-> Distributed Subplan XXX_1
|
-> Distributed Subplan XXX_1
|
||||||
Intermediate Data Size: 14 bytes
|
Intermediate Data Size: 14 bytes
|
||||||
Result destination: Write locally
|
Result destination: Write locally
|
||||||
-> Aggregate (actual rows=1 loops=1)
|
-> Aggregate
|
||||||
-> Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Task Count: 6
|
Task Count: 6
|
||||||
Tuple data received from nodes: 48 bytes
|
|
||||||
Tasks Shown: None, not supported for re-partition queries
|
Tasks Shown: None, not supported for re-partition queries
|
||||||
-> MapMergeJob
|
-> MapMergeJob
|
||||||
Map Task Count: 3
|
Map Task Count: 3
|
||||||
|
@ -2390,14 +2389,12 @@ Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
|
||||||
-> Distributed Subplan XXX_1
|
-> Distributed Subplan XXX_1
|
||||||
Intermediate Data Size: 220 bytes
|
Intermediate Data Size: 220 bytes
|
||||||
Result destination: Send to 3 nodes
|
Result destination: Send to 3 nodes
|
||||||
-> Custom Scan (Citus Adaptive) (actual rows=10 loops=1)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tuple data received from nodes: 120 bytes
|
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Tuple data received from node: 48 bytes
|
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Seq Scan on dist_table_570017 dist_table (actual rows=4 loops=1)
|
-> Seq Scan on dist_table_570017 dist_table
|
||||||
Task Count: 1
|
Task Count: 1
|
||||||
Tuple data received from nodes: 8 bytes
|
Tuple data received from nodes: 8 bytes
|
||||||
Tasks Shown: All
|
Tasks Shown: All
|
||||||
|
@ -2447,23 +2444,19 @@ Aggregate (actual rows=1 loops=1)
|
||||||
-> Distributed Subplan XXX_1
|
-> Distributed Subplan XXX_1
|
||||||
Intermediate Data Size: 70 bytes
|
Intermediate Data Size: 70 bytes
|
||||||
Result destination: Send to 2 nodes
|
Result destination: Send to 2 nodes
|
||||||
-> Custom Scan (Citus Adaptive) (actual rows=10 loops=1)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tuple data received from nodes: 10 bytes
|
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Tuple data received from node: 4 bytes
|
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Merge Join (actual rows=4 loops=1)
|
-> Merge Join
|
||||||
Merge Cond: (dist_table.a = ref_table.a)
|
Merge Cond: (dist_table.a = ref_table.a)
|
||||||
-> Sort (actual rows=4 loops=1)
|
-> Sort
|
||||||
Sort Key: dist_table.a
|
Sort Key: dist_table.a
|
||||||
Sort Method: quicksort Memory: 25kB
|
-> Seq Scan on dist_table_570017 dist_table
|
||||||
-> Seq Scan on dist_table_570017 dist_table (actual rows=4 loops=1)
|
-> Sort
|
||||||
-> Sort (actual rows=10 loops=1)
|
|
||||||
Sort Key: ref_table.a
|
Sort Key: ref_table.a
|
||||||
Sort Method: quicksort Memory: 25kB
|
-> Seq Scan on ref_table_570021 ref_table
|
||||||
-> Seq Scan on ref_table_570021 ref_table (actual rows=10 loops=1)
|
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tuple data received from nodes: 32 bytes
|
Tuple data received from nodes: 32 bytes
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
|
@ -2492,27 +2485,23 @@ Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
|
||||||
-> Distributed Subplan XXX_1
|
-> Distributed Subplan XXX_1
|
||||||
Intermediate Data Size: 100 bytes
|
Intermediate Data Size: 100 bytes
|
||||||
Result destination: Write locally
|
Result destination: Write locally
|
||||||
-> Custom Scan (Citus Adaptive) (actual rows=20 loops=1)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tuple data received from nodes: 160 bytes
|
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Tuple data received from node: 64 bytes
|
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Insert on dist_table_570017 citus_table_alias (actual rows=8 loops=1)
|
-> Insert on dist_table_570017 citus_table_alias
|
||||||
-> Seq Scan on dist_table_570017 dist_table (actual rows=8 loops=1)
|
-> Seq Scan on dist_table_570017 dist_table
|
||||||
Filter: (a IS NOT NULL)
|
Filter: (a IS NOT NULL)
|
||||||
-> Distributed Subplan XXX_2
|
-> Distributed Subplan XXX_2
|
||||||
Intermediate Data Size: 150 bytes
|
Intermediate Data Size: 150 bytes
|
||||||
Result destination: Write locally
|
Result destination: Write locally
|
||||||
-> Custom Scan (Citus Adaptive) (actual rows=10 loops=1)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Task Count: 1
|
Task Count: 1
|
||||||
Tuple data received from nodes: 50 bytes
|
|
||||||
Tasks Shown: All
|
Tasks Shown: All
|
||||||
-> Task
|
-> Task
|
||||||
Tuple data received from node: 50 bytes
|
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Function Scan on read_intermediate_result intermediate_result (actual rows=10 loops=1)
|
-> Function Scan on read_intermediate_result intermediate_result
|
||||||
Task Count: 1
|
Task Count: 1
|
||||||
Tuple data received from nodes: 8 bytes
|
Tuple data received from nodes: 8 bytes
|
||||||
Tasks Shown: All
|
Tasks Shown: All
|
||||||
|
@ -3006,17 +2995,14 @@ Custom Scan (Citus Adaptive) (actual rows=10 loops=1)
|
||||||
-> Distributed Subplan XXX_1
|
-> Distributed Subplan XXX_1
|
||||||
Intermediate Data Size: 0 bytes
|
Intermediate Data Size: 0 bytes
|
||||||
Result destination: Send to 0 nodes
|
Result destination: Send to 0 nodes
|
||||||
-> Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tuple data received from nodes: 0 bytes
|
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Tuple data received from node: 0 bytes
|
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Delete on lineitem_hash_part_360041 lineitem_hash_part (actual rows=0 loops=1)
|
-> Delete on lineitem_hash_part_360041 lineitem_hash_part
|
||||||
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part (actual rows=0 loops=1)
|
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
|
||||||
Filter: (l_quantity < '-1'::numeric)
|
Filter: (l_quantity < '-1'::numeric)
|
||||||
Rows Removed by Filter: 2885
|
|
||||||
Task Count: 1
|
Task Count: 1
|
||||||
Tuple data received from nodes: 40 bytes
|
Tuple data received from nodes: 40 bytes
|
||||||
Tasks Shown: All
|
Tasks Shown: All
|
||||||
|
@ -3138,13 +3124,13 @@ Limit (actual rows=1 loops=1)
|
||||||
-> Distributed Subplan XXX_1
|
-> Distributed Subplan XXX_1
|
||||||
Intermediate Data Size: 14 bytes
|
Intermediate Data Size: 14 bytes
|
||||||
Result destination: Send to 2 nodes
|
Result destination: Send to 2 nodes
|
||||||
-> WindowAgg (actual rows=1 loops=1)
|
-> WindowAgg
|
||||||
-> Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Task Count: 2
|
Task Count: 2
|
||||||
Tasks Shown: One of 2
|
Tasks Shown: One of 2
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Seq Scan on distributed_table_1_570032 distributed_table_1 (actual rows=1 loops=1)
|
-> Seq Scan on distributed_table_1_570032 distributed_table_1
|
||||||
Task Count: 2
|
Task Count: 2
|
||||||
Tuple data received from nodes: 16 bytes
|
Tuple data received from nodes: 16 bytes
|
||||||
Tasks Shown: One of 2
|
Tasks Shown: One of 2
|
||||||
|
@ -3240,5 +3226,34 @@ set auto_explain.log_analyze to true;
|
||||||
-- the following should not be locally executed since explain analyze is on
|
-- the following should not be locally executed since explain analyze is on
|
||||||
select * from test_ref_table;
|
select * from test_ref_table;
|
||||||
DROP SCHEMA test_auto_explain CASCADE;
|
DROP SCHEMA test_auto_explain CASCADE;
|
||||||
|
SET search_path TO multi_explain;
|
||||||
|
-- EXPLAIN ANALYZE shouldn't execute SubPlans twice (bug #4212)
|
||||||
|
CREATE TABLE test_subplans (x int primary key, y int);
|
||||||
|
SELECT create_distributed_table('test_subplans','x');
|
||||||
|
|
||||||
|
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off)
|
||||||
|
WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *)
|
||||||
|
SELECT * FROM a;
|
||||||
|
Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
|
||||||
|
-> Distributed Subplan XXX_1
|
||||||
|
Intermediate Data Size: 18 bytes
|
||||||
|
Result destination: Write locally
|
||||||
|
-> Custom Scan (Citus Adaptive)
|
||||||
|
Task Count: 1
|
||||||
|
Tasks Shown: All
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
|
-> Insert on test_subplans_570039
|
||||||
|
-> Result
|
||||||
|
Task Count: 1
|
||||||
|
Tuple data received from nodes: 8 bytes
|
||||||
|
Tasks Shown: All
|
||||||
|
-> Task
|
||||||
|
Tuple data received from node: 8 bytes
|
||||||
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
|
-> Function Scan on read_intermediate_result intermediate_result (actual rows=1 loops=1)
|
||||||
|
-- Only one row must exist
|
||||||
|
SELECT * FROM test_subplans;
|
||||||
|
1|2
|
||||||
SET client_min_messages TO ERROR;
|
SET client_min_messages TO ERROR;
|
||||||
DROP SCHEMA multi_explain CASCADE;
|
DROP SCHEMA multi_explain CASCADE;
|
||||||
|
|
|
@ -385,10 +385,9 @@ Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
|
||||||
-> Distributed Subplan XXX_1
|
-> Distributed Subplan XXX_1
|
||||||
Intermediate Data Size: 14 bytes
|
Intermediate Data Size: 14 bytes
|
||||||
Result destination: Write locally
|
Result destination: Write locally
|
||||||
-> Aggregate (actual rows=1 loops=1)
|
-> Aggregate
|
||||||
-> Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Task Count: 6
|
Task Count: 6
|
||||||
Tuple data received from nodes: 48 bytes
|
|
||||||
Tasks Shown: None, not supported for re-partition queries
|
Tasks Shown: None, not supported for re-partition queries
|
||||||
-> MapMergeJob
|
-> MapMergeJob
|
||||||
Map Task Count: 3
|
Map Task Count: 3
|
||||||
|
@ -2390,14 +2389,12 @@ Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
|
||||||
-> Distributed Subplan XXX_1
|
-> Distributed Subplan XXX_1
|
||||||
Intermediate Data Size: 220 bytes
|
Intermediate Data Size: 220 bytes
|
||||||
Result destination: Send to 3 nodes
|
Result destination: Send to 3 nodes
|
||||||
-> Custom Scan (Citus Adaptive) (actual rows=10 loops=1)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tuple data received from nodes: 120 bytes
|
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Tuple data received from node: 48 bytes
|
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Seq Scan on dist_table_570017 dist_table (actual rows=4 loops=1)
|
-> Seq Scan on dist_table_570017 dist_table
|
||||||
Task Count: 1
|
Task Count: 1
|
||||||
Tuple data received from nodes: 8 bytes
|
Tuple data received from nodes: 8 bytes
|
||||||
Tasks Shown: All
|
Tasks Shown: All
|
||||||
|
@ -2442,23 +2439,19 @@ Aggregate (actual rows=1 loops=1)
|
||||||
-> Distributed Subplan XXX_1
|
-> Distributed Subplan XXX_1
|
||||||
Intermediate Data Size: 70 bytes
|
Intermediate Data Size: 70 bytes
|
||||||
Result destination: Send to 2 nodes
|
Result destination: Send to 2 nodes
|
||||||
-> Custom Scan (Citus Adaptive) (actual rows=10 loops=1)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tuple data received from nodes: 10 bytes
|
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Tuple data received from node: 4 bytes
|
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Merge Join (actual rows=4 loops=1)
|
-> Merge Join
|
||||||
Merge Cond: (dist_table.a = ref_table.a)
|
Merge Cond: (dist_table.a = ref_table.a)
|
||||||
-> Sort (actual rows=4 loops=1)
|
-> Sort
|
||||||
Sort Key: dist_table.a
|
Sort Key: dist_table.a
|
||||||
Sort Method: quicksort Memory: 25kB
|
-> Seq Scan on dist_table_570017 dist_table
|
||||||
-> Seq Scan on dist_table_570017 dist_table (actual rows=4 loops=1)
|
-> Sort
|
||||||
-> Sort (actual rows=10 loops=1)
|
|
||||||
Sort Key: ref_table.a
|
Sort Key: ref_table.a
|
||||||
Sort Method: quicksort Memory: 25kB
|
-> Seq Scan on ref_table_570021 ref_table
|
||||||
-> Seq Scan on ref_table_570021 ref_table (actual rows=10 loops=1)
|
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tuple data received from nodes: 32 bytes
|
Tuple data received from nodes: 32 bytes
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
|
@ -2484,27 +2477,23 @@ Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
|
||||||
-> Distributed Subplan XXX_1
|
-> Distributed Subplan XXX_1
|
||||||
Intermediate Data Size: 100 bytes
|
Intermediate Data Size: 100 bytes
|
||||||
Result destination: Write locally
|
Result destination: Write locally
|
||||||
-> Custom Scan (Citus Adaptive) (actual rows=20 loops=1)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tuple data received from nodes: 160 bytes
|
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Tuple data received from node: 64 bytes
|
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Insert on dist_table_570017 citus_table_alias (actual rows=8 loops=1)
|
-> Insert on dist_table_570017 citus_table_alias
|
||||||
-> Seq Scan on dist_table_570017 dist_table (actual rows=8 loops=1)
|
-> Seq Scan on dist_table_570017 dist_table
|
||||||
Filter: (a IS NOT NULL)
|
Filter: (a IS NOT NULL)
|
||||||
-> Distributed Subplan XXX_2
|
-> Distributed Subplan XXX_2
|
||||||
Intermediate Data Size: 150 bytes
|
Intermediate Data Size: 150 bytes
|
||||||
Result destination: Write locally
|
Result destination: Write locally
|
||||||
-> Custom Scan (Citus Adaptive) (actual rows=10 loops=1)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Task Count: 1
|
Task Count: 1
|
||||||
Tuple data received from nodes: 50 bytes
|
|
||||||
Tasks Shown: All
|
Tasks Shown: All
|
||||||
-> Task
|
-> Task
|
||||||
Tuple data received from node: 50 bytes
|
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Function Scan on read_intermediate_result intermediate_result (actual rows=10 loops=1)
|
-> Function Scan on read_intermediate_result intermediate_result
|
||||||
Task Count: 1
|
Task Count: 1
|
||||||
Tuple data received from nodes: 8 bytes
|
Tuple data received from nodes: 8 bytes
|
||||||
Tasks Shown: All
|
Tasks Shown: All
|
||||||
|
@ -2995,17 +2984,14 @@ Custom Scan (Citus Adaptive) (actual rows=10 loops=1)
|
||||||
-> Distributed Subplan XXX_1
|
-> Distributed Subplan XXX_1
|
||||||
Intermediate Data Size: 0 bytes
|
Intermediate Data Size: 0 bytes
|
||||||
Result destination: Send to 0 nodes
|
Result destination: Send to 0 nodes
|
||||||
-> Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tuple data received from nodes: 0 bytes
|
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Tuple data received from node: 0 bytes
|
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Delete on lineitem_hash_part_360041 lineitem_hash_part (actual rows=0 loops=1)
|
-> Delete on lineitem_hash_part_360041 lineitem_hash_part
|
||||||
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part (actual rows=0 loops=1)
|
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
|
||||||
Filter: (l_quantity < '-1'::numeric)
|
Filter: (l_quantity < '-1'::numeric)
|
||||||
Rows Removed by Filter: 2885
|
|
||||||
Task Count: 1
|
Task Count: 1
|
||||||
Tuple data received from nodes: 40 bytes
|
Tuple data received from nodes: 40 bytes
|
||||||
Tasks Shown: All
|
Tasks Shown: All
|
||||||
|
@ -3127,13 +3113,13 @@ Limit (actual rows=1 loops=1)
|
||||||
-> Distributed Subplan XXX_1
|
-> Distributed Subplan XXX_1
|
||||||
Intermediate Data Size: 14 bytes
|
Intermediate Data Size: 14 bytes
|
||||||
Result destination: Send to 2 nodes
|
Result destination: Send to 2 nodes
|
||||||
-> WindowAgg (actual rows=1 loops=1)
|
-> WindowAgg
|
||||||
-> Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Task Count: 2
|
Task Count: 2
|
||||||
Tasks Shown: One of 2
|
Tasks Shown: One of 2
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Seq Scan on distributed_table_1_570032 distributed_table_1 (actual rows=1 loops=1)
|
-> Seq Scan on distributed_table_1_570032 distributed_table_1
|
||||||
Task Count: 2
|
Task Count: 2
|
||||||
Tuple data received from nodes: 16 bytes
|
Tuple data received from nodes: 16 bytes
|
||||||
Tasks Shown: One of 2
|
Tasks Shown: One of 2
|
||||||
|
@ -3229,5 +3215,34 @@ set auto_explain.log_analyze to true;
|
||||||
-- the following should not be locally executed since explain analyze is on
|
-- the following should not be locally executed since explain analyze is on
|
||||||
select * from test_ref_table;
|
select * from test_ref_table;
|
||||||
DROP SCHEMA test_auto_explain CASCADE;
|
DROP SCHEMA test_auto_explain CASCADE;
|
||||||
|
SET search_path TO multi_explain;
|
||||||
|
-- EXPLAIN ANALYZE shouldn't execute SubPlans twice (bug #4212)
|
||||||
|
CREATE TABLE test_subplans (x int primary key, y int);
|
||||||
|
SELECT create_distributed_table('test_subplans','x');
|
||||||
|
|
||||||
|
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off)
|
||||||
|
WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *)
|
||||||
|
SELECT * FROM a;
|
||||||
|
Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
|
||||||
|
-> Distributed Subplan XXX_1
|
||||||
|
Intermediate Data Size: 18 bytes
|
||||||
|
Result destination: Write locally
|
||||||
|
-> Custom Scan (Citus Adaptive)
|
||||||
|
Task Count: 1
|
||||||
|
Tasks Shown: All
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
|
-> Insert on test_subplans_570039
|
||||||
|
-> Result
|
||||||
|
Task Count: 1
|
||||||
|
Tuple data received from nodes: 8 bytes
|
||||||
|
Tasks Shown: All
|
||||||
|
-> Task
|
||||||
|
Tuple data received from node: 8 bytes
|
||||||
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
|
-> Function Scan on read_intermediate_result intermediate_result (actual rows=1 loops=1)
|
||||||
|
-- Only one row must exist
|
||||||
|
SELECT * FROM test_subplans;
|
||||||
|
1|2
|
||||||
SET client_min_messages TO ERROR;
|
SET client_min_messages TO ERROR;
|
||||||
DROP SCHEMA multi_explain CASCADE;
|
DROP SCHEMA multi_explain CASCADE;
|
||||||
|
|
|
@ -721,13 +721,11 @@ CALL exec_query_and_check_query_counters($$
|
||||||
0, 0
|
0, 0
|
||||||
);
|
);
|
||||||
-- same with explain analyze
|
-- same with explain analyze
|
||||||
--
|
|
||||||
-- this time, query_execution_multi_shard is incremented twice because of #4212
|
|
||||||
CALL exec_query_and_check_query_counters($$
|
CALL exec_query_and_check_query_counters($$
|
||||||
EXPLAIN (ANALYZE)
|
EXPLAIN (ANALYZE)
|
||||||
SELECT * FROM (SELECT * FROM dist_table OFFSET 0) q
|
SELECT * FROM (SELECT * FROM dist_table OFFSET 0) q
|
||||||
$$,
|
$$,
|
||||||
1, 2
|
1, 1
|
||||||
);
|
);
|
||||||
CALL exec_query_and_check_query_counters($$
|
CALL exec_query_and_check_query_counters($$
|
||||||
DELETE FROM dist_table WHERE a = 1
|
DELETE FROM dist_table WHERE a = 1
|
||||||
|
@ -1041,9 +1039,6 @@ PL/pgSQL function exec_query_and_check_query_counters(text,bigint,bigint) line X
|
||||||
-- A similar one but without the insert, so we would normally expect 2 increments
|
-- A similar one but without the insert, so we would normally expect 2 increments
|
||||||
-- for query_execution_single_shard and 2 for query_execution_multi_shard instead
|
-- for query_execution_single_shard and 2 for query_execution_multi_shard instead
|
||||||
-- of 3 since the insert is not there anymore.
|
-- of 3 since the insert is not there anymore.
|
||||||
--
|
|
||||||
-- But this time we observe more counter increments because we execute the subplans
|
|
||||||
-- twice because of #4212.
|
|
||||||
CALL exec_query_and_check_query_counters($$
|
CALL exec_query_and_check_query_counters($$
|
||||||
EXPLAIN (ANALYZE)
|
EXPLAIN (ANALYZE)
|
||||||
-- single-shard subplan (whole cte)
|
-- single-shard subplan (whole cte)
|
||||||
|
@ -1057,7 +1052,7 @@ CALL exec_query_and_check_query_counters($$
|
||||||
FROM (SELECT * FROM dist_table_1 ORDER BY a LIMIT 16) q -- multi-shard subplan (subquery q)
|
FROM (SELECT * FROM dist_table_1 ORDER BY a LIMIT 16) q -- multi-shard subplan (subquery q)
|
||||||
JOIN cte ON q.a = cte.a
|
JOIN cte ON q.a = cte.a
|
||||||
$$,
|
$$,
|
||||||
3, 4
|
2, 2
|
||||||
);
|
);
|
||||||
-- safe to push-down
|
-- safe to push-down
|
||||||
CALL exec_query_and_check_query_counters($$
|
CALL exec_query_and_check_query_counters($$
|
||||||
|
|
|
@ -1182,5 +1182,15 @@ select * from test_ref_table;
|
||||||
|
|
||||||
DROP SCHEMA test_auto_explain CASCADE;
|
DROP SCHEMA test_auto_explain CASCADE;
|
||||||
|
|
||||||
|
SET search_path TO multi_explain;
|
||||||
|
-- EXPLAIN ANALYZE shouldn't execute SubPlans twice (bug #4212)
|
||||||
|
CREATE TABLE test_subplans (x int primary key, y int);
|
||||||
|
SELECT create_distributed_table('test_subplans','x');
|
||||||
|
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off)
|
||||||
|
WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *)
|
||||||
|
SELECT * FROM a;
|
||||||
|
-- Only one row must exist
|
||||||
|
SELECT * FROM test_subplans;
|
||||||
|
|
||||||
SET client_min_messages TO ERROR;
|
SET client_min_messages TO ERROR;
|
||||||
DROP SCHEMA multi_explain CASCADE;
|
DROP SCHEMA multi_explain CASCADE;
|
||||||
|
|
|
@ -476,13 +476,11 @@ CALL exec_query_and_check_query_counters($$
|
||||||
);
|
);
|
||||||
|
|
||||||
-- same with explain analyze
|
-- same with explain analyze
|
||||||
--
|
|
||||||
-- this time, query_execution_multi_shard is incremented twice because of #4212
|
|
||||||
CALL exec_query_and_check_query_counters($$
|
CALL exec_query_and_check_query_counters($$
|
||||||
EXPLAIN (ANALYZE)
|
EXPLAIN (ANALYZE)
|
||||||
SELECT * FROM (SELECT * FROM dist_table OFFSET 0) q
|
SELECT * FROM (SELECT * FROM dist_table OFFSET 0) q
|
||||||
$$,
|
$$,
|
||||||
1, 2
|
1, 1
|
||||||
);
|
);
|
||||||
|
|
||||||
CALL exec_query_and_check_query_counters($$
|
CALL exec_query_and_check_query_counters($$
|
||||||
|
@ -807,9 +805,6 @@ CALL exec_query_and_check_query_counters($$
|
||||||
-- A similar one but without the insert, so we would normally expect 2 increments
|
-- A similar one but without the insert, so we would normally expect 2 increments
|
||||||
-- for query_execution_single_shard and 2 for query_execution_multi_shard instead
|
-- for query_execution_single_shard and 2 for query_execution_multi_shard instead
|
||||||
-- of 3 since the insert is not there anymore.
|
-- of 3 since the insert is not there anymore.
|
||||||
--
|
|
||||||
-- But this time we observe more counter increments because we execute the subplans
|
|
||||||
-- twice because of #4212.
|
|
||||||
CALL exec_query_and_check_query_counters($$
|
CALL exec_query_and_check_query_counters($$
|
||||||
EXPLAIN (ANALYZE)
|
EXPLAIN (ANALYZE)
|
||||||
-- single-shard subplan (whole cte)
|
-- single-shard subplan (whole cte)
|
||||||
|
@ -823,7 +818,7 @@ CALL exec_query_and_check_query_counters($$
|
||||||
FROM (SELECT * FROM dist_table_1 ORDER BY a LIMIT 16) q -- multi-shard subplan (subquery q)
|
FROM (SELECT * FROM dist_table_1 ORDER BY a LIMIT 16) q -- multi-shard subplan (subquery q)
|
||||||
JOIN cte ON q.a = cte.a
|
JOIN cte ON q.a = cte.a
|
||||||
$$,
|
$$,
|
||||||
3, 4
|
2, 2
|
||||||
);
|
);
|
||||||
|
|
||||||
-- safe to push-down
|
-- safe to push-down
|
||||||
|
|
Loading…
Reference in New Issue