mirror of https://github.com/citusdata/citus.git
Merge pull request #1974 from citusdata/remove_broadcast_pr2_v2
Adds colocation check to local joinpull/2060/head
commit
aa0aea9840
|
@ -103,7 +103,8 @@ static JoinOrderNode * CartesianProduct(JoinOrderNode *joinNode,
|
|||
JoinType joinType);
|
||||
static JoinOrderNode * MakeJoinOrderNode(TableEntry *tableEntry, JoinRuleType
|
||||
joinRuleType, Var *partitionColumn,
|
||||
char partitionMethod);
|
||||
char partitionMethod,
|
||||
TableEntry *anchorTable);
|
||||
|
||||
|
||||
/*
|
||||
|
@ -295,7 +296,8 @@ CreateFirstJoinOrderNode(FromExpr *fromExpr, List *tableEntryList)
|
|||
|
||||
firstJoinNode = MakeJoinOrderNode(firstTable, firstJoinRule,
|
||||
firstPartitionColumn,
|
||||
firstPartitionMethod);
|
||||
firstPartitionMethod,
|
||||
firstTable);
|
||||
|
||||
firstJoinNode->shardIntervalList = LoadShardIntervalList(firstTable->relationId);
|
||||
|
||||
|
@ -671,7 +673,8 @@ JoinOrderForTable(TableEntry *firstTable, List *tableEntryList, List *joinClause
|
|||
|
||||
JoinOrderNode *firstJoinNode = MakeJoinOrderNode(firstTable, firstJoinRule,
|
||||
firstPartitionColumn,
|
||||
firstPartitionMethod);
|
||||
firstPartitionMethod,
|
||||
firstTable);
|
||||
|
||||
/* add first node to the join order */
|
||||
joinOrderList = list_make1(firstJoinNode);
|
||||
|
@ -1195,7 +1198,8 @@ BroadcastJoin(JoinOrderNode *currentJoinNode, TableEntry *candidateTable,
|
|||
{
|
||||
nextJoinNode = MakeJoinOrderNode(candidateTable, BROADCAST_JOIN,
|
||||
currentJoinNode->partitionColumn,
|
||||
currentJoinNode->partitionMethod);
|
||||
currentJoinNode->partitionMethod,
|
||||
currentJoinNode->anchorTable);
|
||||
}
|
||||
|
||||
return nextJoinNode;
|
||||
|
@ -1208,6 +1212,11 @@ BroadcastJoin(JoinOrderNode *currentJoinNode, TableEntry *candidateTable,
|
|||
* then evaluates if tables in the join order and the candidate table can be
|
||||
* joined locally, without any data transfers. If they can, the function returns
|
||||
* a join order node for a local join. Otherwise, the function returns null.
|
||||
*
|
||||
* Anchor table is used to decide whether the JoinOrderNode can be joined
|
||||
* locally with the candidate table. That table is updated by each join type
|
||||
* applied over JoinOrderNode. Note that, we lost the anchor table after
|
||||
* dual partitioning and cartesian product.
|
||||
*/
|
||||
static JoinOrderNode *
|
||||
LocalJoin(JoinOrderNode *currentJoinNode, TableEntry *candidateTable,
|
||||
|
@ -1221,7 +1230,22 @@ LocalJoin(JoinOrderNode *currentJoinNode, TableEntry *candidateTable,
|
|||
Var *currentPartitionColumn = currentJoinNode->partitionColumn;
|
||||
char candidatePartitionMethod = PartitionMethod(relationId);
|
||||
char currentPartitionMethod = currentJoinNode->partitionMethod;
|
||||
TableEntry *currentAnchorTable = currentJoinNode->anchorTable;
|
||||
JoinRuleType currentJoinRuleType = currentJoinNode->joinRuleType;
|
||||
bool joinOnPartitionColumns = false;
|
||||
bool coPartitionedTables = false;
|
||||
|
||||
/*
|
||||
* If we previously dual-hash re-partitioned the tables for a join or made
|
||||
* cartesian product, we currently don't allow local join.
|
||||
*/
|
||||
if (currentJoinRuleType == DUAL_PARTITION_JOIN ||
|
||||
currentJoinRuleType == CARTESIAN_PRODUCT)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
Assert(currentAnchorTable != NULL);
|
||||
|
||||
/* the partition method should be the same for a local join */
|
||||
if (currentPartitionMethod != candidatePartitionMethod)
|
||||
|
@ -1232,13 +1256,25 @@ LocalJoin(JoinOrderNode *currentJoinNode, TableEntry *candidateTable,
|
|||
joinOnPartitionColumns = JoinOnColumns(currentPartitionColumn,
|
||||
candidatePartitionColumn,
|
||||
applicableJoinClauses);
|
||||
if (joinOnPartitionColumns)
|
||||
if (!joinOnPartitionColumns)
|
||||
{
|
||||
nextJoinNode = MakeJoinOrderNode(candidateTable, LOCAL_PARTITION_JOIN,
|
||||
currentPartitionColumn,
|
||||
currentPartitionMethod);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* shard interval lists must have 1-1 matching for local joins */
|
||||
coPartitionedTables = CoPartitionedTables(currentAnchorTable->relationId, relationId);
|
||||
|
||||
if (!coPartitionedTables)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
nextJoinNode = MakeJoinOrderNode(candidateTable, LOCAL_PARTITION_JOIN,
|
||||
currentPartitionColumn,
|
||||
currentPartitionMethod,
|
||||
currentAnchorTable);
|
||||
|
||||
|
||||
return nextJoinNode;
|
||||
}
|
||||
|
||||
|
@ -1258,6 +1294,7 @@ SinglePartitionJoin(JoinOrderNode *currentJoinNode, TableEntry *candidateTable,
|
|||
JoinOrderNode *nextJoinNode = NULL;
|
||||
Var *currentPartitionColumn = currentJoinNode->partitionColumn;
|
||||
char currentPartitionMethod = currentJoinNode->partitionMethod;
|
||||
TableEntry *currentAnchorTable = currentJoinNode->anchorTable;
|
||||
|
||||
Oid relationId = candidateTable->relationId;
|
||||
uint32 tableId = candidateTable->rangeTableId;
|
||||
|
@ -1288,7 +1325,8 @@ SinglePartitionJoin(JoinOrderNode *currentJoinNode, TableEntry *candidateTable,
|
|||
{
|
||||
nextJoinNode = MakeJoinOrderNode(candidateTable, SINGLE_PARTITION_JOIN,
|
||||
currentPartitionColumn,
|
||||
currentPartitionMethod);
|
||||
currentPartitionMethod,
|
||||
currentAnchorTable);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1303,7 +1341,8 @@ SinglePartitionJoin(JoinOrderNode *currentJoinNode, TableEntry *candidateTable,
|
|||
{
|
||||
nextJoinNode = MakeJoinOrderNode(candidateTable, SINGLE_PARTITION_JOIN,
|
||||
candidatePartitionColumn,
|
||||
candidatePartitionMethod);
|
||||
candidatePartitionMethod,
|
||||
candidateTable);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1364,6 +1403,8 @@ DualPartitionJoin(JoinOrderNode *currentJoinNode, TableEntry *candidateTable,
|
|||
List *candidateShardList, List *applicableJoinClauses,
|
||||
JoinType joinType)
|
||||
{
|
||||
/* Because of the dual partition, anchor table information got lost */
|
||||
TableEntry *anchorTable = NULL;
|
||||
JoinOrderNode *nextJoinNode = NULL;
|
||||
|
||||
OpExpr *joinClause = DualPartitionJoinClause(applicableJoinClauses);
|
||||
|
@ -1371,7 +1412,8 @@ DualPartitionJoin(JoinOrderNode *currentJoinNode, TableEntry *candidateTable,
|
|||
{
|
||||
Var *nextPartitionColumn = LeftColumn(joinClause);
|
||||
nextJoinNode = MakeJoinOrderNode(candidateTable, DUAL_PARTITION_JOIN,
|
||||
nextPartitionColumn, REDISTRIBUTE_BY_HASH);
|
||||
nextPartitionColumn, REDISTRIBUTE_BY_HASH,
|
||||
anchorTable);
|
||||
}
|
||||
|
||||
return nextJoinNode;
|
||||
|
@ -1421,9 +1463,13 @@ CartesianProduct(JoinOrderNode *currentJoinNode, TableEntry *candidateTable,
|
|||
List *candidateShardList, List *applicableJoinClauses,
|
||||
JoinType joinType)
|
||||
{
|
||||
/* Because of the cartesian product, anchor table information got lost */
|
||||
TableEntry *anchorTable = NULL;
|
||||
|
||||
JoinOrderNode *nextJoinNode = MakeJoinOrderNode(candidateTable, CARTESIAN_PRODUCT,
|
||||
currentJoinNode->partitionColumn,
|
||||
currentJoinNode->partitionMethod);
|
||||
currentJoinNode->partitionMethod,
|
||||
anchorTable);
|
||||
|
||||
return nextJoinNode;
|
||||
}
|
||||
|
@ -1432,7 +1478,7 @@ CartesianProduct(JoinOrderNode *currentJoinNode, TableEntry *candidateTable,
|
|||
/* Constructs and returns a join-order node with the given arguments */
|
||||
JoinOrderNode *
|
||||
MakeJoinOrderNode(TableEntry *tableEntry, JoinRuleType joinRuleType,
|
||||
Var *partitionColumn, char partitionMethod)
|
||||
Var *partitionColumn, char partitionMethod, TableEntry *anchorTable)
|
||||
{
|
||||
JoinOrderNode *joinOrderNode = palloc0(sizeof(JoinOrderNode));
|
||||
joinOrderNode->tableEntry = tableEntry;
|
||||
|
@ -1441,6 +1487,7 @@ MakeJoinOrderNode(TableEntry *tableEntry, JoinRuleType joinRuleType,
|
|||
joinOrderNode->partitionColumn = partitionColumn;
|
||||
joinOrderNode->partitionMethod = partitionMethod;
|
||||
joinOrderNode->joinClauseList = NIL;
|
||||
joinOrderNode->anchorTable = anchorTable;
|
||||
|
||||
return joinOrderNode;
|
||||
}
|
||||
|
|
|
@ -127,7 +127,6 @@ static Job * BuildJobTreeTaskList(Job *jobTree,
|
|||
static List * SubquerySqlTaskList(Job *job,
|
||||
PlannerRestrictionContext *plannerRestrictionContext);
|
||||
static void ErrorIfUnsupportedShardDistribution(Query *query);
|
||||
static bool CoPartitionedTables(Oid firstRelationId, Oid secondRelationId);
|
||||
static bool ShardIntervalsEqual(FmgrInfo *comparisonFunction,
|
||||
ShardInterval *firstInterval,
|
||||
ShardInterval *secondInterval);
|
||||
|
@ -2228,9 +2227,13 @@ ErrorIfUnsupportedShardDistribution(Query *query)
|
|||
|
||||
/*
|
||||
* CoPartitionedTables checks if given two distributed tables have 1-to-1 shard
|
||||
* partitioning.
|
||||
* placement matching. It first checks for the shard count, if tables don't have
|
||||
* same amount shard then it returns false. Note that, if any table does not
|
||||
* have any shard, it returns true. If two tables have same amount of shards,
|
||||
* we check colocationIds for hash distributed tables and shardInterval's min
|
||||
* max values for append and range distributed tables.
|
||||
*/
|
||||
static bool
|
||||
bool
|
||||
CoPartitionedTables(Oid firstRelationId, Oid secondRelationId)
|
||||
{
|
||||
bool coPartitionedTables = true;
|
||||
|
@ -2267,6 +2270,20 @@ CoPartitionedTables(Oid firstRelationId, Oid secondRelationId)
|
|||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* For hash distributed tables two tables are accepted as colocated only if
|
||||
* they have the same colocationId. Otherwise they may have same minimum and
|
||||
* maximum values for each shard interval, yet hash function may result with
|
||||
* different values for the same value. int vs bigint can be given as an
|
||||
* example.
|
||||
*/
|
||||
if (firstTableCache->partitionMethod == DISTRIBUTE_BY_HASH ||
|
||||
secondTableCache->partitionMethod == DISTRIBUTE_BY_HASH)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* If not known to be colocated check if the remaining shards are
|
||||
* anyway. Do so by comparing the shard interval arrays that are sorted on
|
||||
|
|
|
@ -70,6 +70,7 @@ typedef struct JoinOrderNode
|
|||
char partitionMethod;
|
||||
List *joinClauseList; /* not relevant for the first table */
|
||||
List *shardIntervalList;
|
||||
TableEntry *anchorTable;
|
||||
} JoinOrderNode;
|
||||
|
||||
|
||||
|
|
|
@ -317,6 +317,7 @@ extern Const * MakeInt4Constant(Datum constantValue);
|
|||
extern int CompareShardPlacements(const void *leftElement, const void *rightElement);
|
||||
extern bool ShardIntervalsOverlap(ShardInterval *firstInterval,
|
||||
ShardInterval *secondInterval);
|
||||
extern bool CoPartitionedTables(Oid firstRelationId, Oid secondRelationId);
|
||||
|
||||
/* function declarations for Task and Task list operations */
|
||||
extern bool TasksEqual(const Task *a, const Task *b);
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
11|Customer#000000011|PkWS 3HlXqwTuzrKg633BEi|23|33-464-151-3439|-272.60|BUILDING|ckages. requests sleep slyly. quickly even pinto beans promise above the slyly regular pinto beans.
|
||||
12|Customer#000000012|9PWKuhzT4Zr1Q|13|23-791-276-1263|3396.49|HOUSEHOLD| to the carefully final braids. blithely regular requests nag. ironic theodolites boost quickly along
|
||||
14|Customer#000000014|KXkletMlL2JQEA |1|11-845-129-3851|5266.30|FURNITURE|, ironic packages across the unus
|
||||
16|Customer#000000016|cYiaeMLZSMAOQ2 d0W,|10|20-781-609-3107|4681.03|FURNITURE|kly silent courts. thinly regular theodolites sleep fluffily after
|
||||
17|Customer#000000017|izrh 6jdqtp2eqdtbkswDD8SG4SzXruMfIXyR7|2|12-970-682-3487|6.34|AUTOMOBILE|packages wake! blithely even pint
|
||||
18|Customer#000000018|3txGO AiuFux3zT0Z9NYaFRnZt|6|16-155-215-1315|5494.43|BUILDING|s sleep. carefully even instructions nag furiously alongside of t
|
||||
20|Customer#000000020|JrPk8Pqplj4Ne|22|32-957-234-8742|7603.40|FURNITURE|g alongside of the special excuses-- fluffily enticing packages wake
|
|
@ -0,0 +1,7 @@
|
|||
21|Customer#000000021|XYmVpr9yAHDEn|8|18-902-614-8344|1428.25|MACHINERY| quickly final accounts integrate blithely furiously u
|
||||
22|Customer#000000022|QI6p41,FNs5k7RZoCCVPUTkUdYpB|3|13-806-545-9701|591.98|MACHINERY|s nod furiously above the furiously ironic ideas.
|
||||
24|Customer#000000024|HXAFgIAyjxtdqwimt13Y3OZO 4xeLe7U8PqG|13|23-127-851-8031|9255.67|MACHINERY|into beans. fluffily final ideas haggle fluffily
|
||||
26|Customer#000000026|8ljrc5ZeMl7UciP|22|32-363-455-4837|5182.05|AUTOMOBILE|c requests use furiously ironic requests. slyly ironic dependencies us
|
||||
27|Customer#000000027|IS8GIyxpBrLpMT0u7|3|13-137-193-2709|5679.84|BUILDING| about the carefully ironic pinto beans. accoun
|
||||
28|Customer#000000028|iVyg0daQ,Tha8x2WPWA9m2529m|8|18-774-241-1462|1007.18|FURNITURE| along the regular deposits. furiously final pac
|
||||
30|Customer#000000030|nJDsELGAavU63Jl0c5NKsKfL8rIJQQkQnYL2QJY|1|11-764-165-5076|9321.01|BUILDING|lithely final requests. furiously unusual account
|
|
@ -1484,3 +1484,4 @@
|
|||
14944|535|O|119586.69|1997-10-14|2-HIGH|Clerk#000000962|0|lly. even instructions against
|
||||
14945|68|O|210519.05|1996-03-30|1-URGENT|Clerk#000000467|0|nts? fluffily bold grouches after
|
||||
14946|580|O|100402.47|1996-11-12|1-URGENT|Clerk#000000116|0|ffily bold dependencies wake. furiously regular instructions aro
|
||||
14947|580|O|100402.47|1996-11-12|1-URGENT|Clerk#000000116|0|ffily bold dependencies wake. furiously regular instructions aro
|
||||
|
|
|
@ -17,7 +17,7 @@ step s1-master_append_table_to_shard:
|
|||
|
||||
master_append_table_to_shard
|
||||
|
||||
0.213333
|
||||
0.0426667
|
||||
step s2-master_append_table_to_shard:
|
||||
|
||||
SELECT
|
||||
|
@ -33,7 +33,7 @@ step s1-commit:
|
|||
step s2-master_append_table_to_shard: <... completed>
|
||||
master_append_table_to_shard
|
||||
|
||||
0.32
|
||||
0.064
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
|
|
|
@ -206,7 +206,7 @@ SELECT count(*) FROM orders
|
|||
WHERE o_comment IS NOT null;
|
||||
count
|
||||
-------
|
||||
2984
|
||||
2985
|
||||
(1 row)
|
||||
|
||||
-- functions can be pushed down
|
||||
|
@ -431,11 +431,12 @@ SELECT o_custkey, COUNT(*) AS ccnt FROM orders GROUP BY o_custkey ORDER BY ccnt
|
|||
SELECT o_custkey FROM orders ORDER BY o_custkey OFFSET 2980;
|
||||
o_custkey
|
||||
-----------
|
||||
1498
|
||||
1498
|
||||
1499
|
||||
1499
|
||||
1499
|
||||
(4 rows)
|
||||
(5 rows)
|
||||
|
||||
-- LIMIT/OFFSET with Joins
|
||||
SELECT
|
||||
|
|
|
@ -206,7 +206,7 @@ SELECT count(*) FROM orders
|
|||
WHERE o_comment IS NOT null;
|
||||
count
|
||||
-------
|
||||
2984
|
||||
2985
|
||||
(1 row)
|
||||
|
||||
-- functions can be pushed down
|
||||
|
@ -417,11 +417,12 @@ SELECT o_custkey, COUNT(*) AS ccnt FROM orders GROUP BY o_custkey ORDER BY ccnt
|
|||
SELECT o_custkey FROM orders ORDER BY o_custkey OFFSET 2980;
|
||||
o_custkey
|
||||
-----------
|
||||
1498
|
||||
1498
|
||||
1499
|
||||
1499
|
||||
1499
|
||||
(4 rows)
|
||||
(5 rows)
|
||||
|
||||
-- LIMIT/OFFSET with Joins
|
||||
SELECT
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
-- MULTI_CREATE_TABLE
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 360000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 100000;
|
||||
-- Create new table definitions for use in testing in distributed planning and
|
||||
-- execution functionality. Also create indexes to boost performance.
|
||||
CREATE TABLE lineitem (
|
||||
|
|
|
@ -48,7 +48,7 @@ SELECT * FROM customer LIMIT 2;
|
|||
SELECT count(*) FROM customer, orders WHERE c_custkey = o_custkey;
|
||||
count
|
||||
-------
|
||||
1955
|
||||
1956
|
||||
(1 row)
|
||||
|
||||
-- Test joinExpr aliases by performing an outer-join. This code path is
|
||||
|
|
|
@ -47,8 +47,8 @@ Sort
|
|||
-> HashAggregate
|
||||
Group Key: remote_scan.l_quantity
|
||||
-> Custom Scan (Citus Real-Time)
|
||||
Task Count: 8
|
||||
Tasks Shown: One of 8
|
||||
Task Count: 2
|
||||
Tasks Shown: One of 2
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> HashAggregate
|
||||
|
@ -66,8 +66,8 @@ Sort
|
|||
-> Sort
|
||||
Sort Key: remote_scan.l_quantity
|
||||
-> Custom Scan (Citus Real-Time)
|
||||
Task Count: 8
|
||||
Tasks Shown: One of 8
|
||||
Task Count: 2
|
||||
Tasks Shown: One of 2
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> HashAggregate
|
||||
|
@ -100,8 +100,8 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
|
|||
"Parallel Aware": false,
|
||||
"Distributed Query": {
|
||||
"Job": {
|
||||
"Task Count": 8,
|
||||
"Tasks Shown": "One of 8",
|
||||
"Task Count": 2,
|
||||
"Tasks Shown": "One of 2",
|
||||
"Tasks": [
|
||||
{
|
||||
"Node": "host=localhost port=57637 dbname=regression",
|
||||
|
@ -175,8 +175,8 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
|
|||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Distributed-Query>
|
||||
<Job>
|
||||
<Task-Count>8</Task-Count>
|
||||
<Tasks-Shown>One of 8</Tasks-Shown>
|
||||
<Task-Count>2</Task-Count>
|
||||
<Tasks-Shown>One of 2</Tasks-Shown>
|
||||
<Tasks>
|
||||
<Task>
|
||||
<Node>host=localhost port=57637 dbname=regression</Node>
|
||||
|
@ -245,8 +245,8 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
|
|||
Parallel Aware: false
|
||||
Distributed Query:
|
||||
Job:
|
||||
Task Count: 8
|
||||
Tasks Shown: "One of 8"
|
||||
Task Count: 2
|
||||
Tasks Shown: "One of 2"
|
||||
Tasks:
|
||||
- Node: "host=localhost port=57637 dbname=regression"
|
||||
Remote Plan:
|
||||
|
@ -273,8 +273,8 @@ Sort
|
|||
-> HashAggregate
|
||||
Group Key: remote_scan.l_quantity
|
||||
-> Custom Scan (Citus Real-Time)
|
||||
Task Count: 8
|
||||
Tasks Shown: One of 8
|
||||
Task Count: 2
|
||||
Tasks Shown: One of 2
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> HashAggregate
|
||||
|
@ -287,8 +287,8 @@ Aggregate
|
|||
Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2")))
|
||||
-> Custom Scan (Citus Real-Time)
|
||||
Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2"
|
||||
Task Count: 8
|
||||
Tasks Shown: One of 8
|
||||
Task Count: 2
|
||||
Tasks Shown: One of 2
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
|
@ -304,20 +304,19 @@ Limit
|
|||
-> Sort
|
||||
Sort Key: remote_scan.l_quantity
|
||||
-> Custom Scan (Citus Real-Time)
|
||||
Task Count: 8
|
||||
Tasks Shown: One of 8
|
||||
Task Count: 2
|
||||
Tasks Shown: One of 2
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Limit
|
||||
-> Sort
|
||||
Sort Key: lineitem.l_quantity
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Sort
|
||||
Sort Key: lineitem.l_orderkey
|
||||
-> Seq Scan on lineitem_290001 lineitem
|
||||
Filter: (l_quantity < 5.0)
|
||||
-> Hash Join
|
||||
Hash Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Seq Scan on lineitem_290001 lineitem
|
||||
Filter: (l_quantity < 5.0)
|
||||
-> Hash
|
||||
-> Seq Scan on orders_290003 orders
|
||||
-- Test insert
|
||||
EXPLAIN (COSTS FALSE)
|
||||
INSERT INTO lineitem VALUES (1,0), (2, 0), (3, 0), (4, 0);
|
||||
|
@ -391,8 +390,8 @@ EXPLAIN (COSTS FALSE)
|
|||
CREATE TABLE explain_result AS
|
||||
SELECT * FROM lineitem;
|
||||
Custom Scan (Citus Real-Time)
|
||||
Task Count: 8
|
||||
Tasks Shown: One of 8
|
||||
Task Count: 2
|
||||
Tasks Shown: One of 2
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Seq Scan on lineitem_290001 lineitem
|
||||
|
@ -405,8 +404,8 @@ Aggregate
|
|||
Filter: (sum(remote_scan.worker_column_4) > '100'::numeric)
|
||||
-> Custom Scan (Citus Real-Time)
|
||||
Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2", remote_scan.worker_column_4
|
||||
Task Count: 8
|
||||
Tasks Shown: One of 8
|
||||
Task Count: 2
|
||||
Tasks Shown: One of 2
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
|
@ -424,8 +423,8 @@ HashAggregate
|
|||
Filter: ((remote_scan.worker_column_2)::double precision > ('100'::double precision * random()))
|
||||
-> Custom Scan (Citus Real-Time)
|
||||
Output: remote_scan.l_quantity, remote_scan.worker_column_2
|
||||
Task Count: 8
|
||||
Tasks Shown: One of 8
|
||||
Task Count: 2
|
||||
Tasks Shown: One of 2
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> HashAggregate
|
||||
|
@ -740,27 +739,12 @@ EXPLAIN (COSTS FALSE)
|
|||
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Real-Time)
|
||||
Task Count: 4
|
||||
Task Count: 1
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_290005 lineitem
|
||||
Filter: (l_orderkey > 9030)
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_290004 lineitem
|
||||
Filter: (l_orderkey > 9030)
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_290007 lineitem
|
||||
Filter: (l_orderkey > 9030)
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_290006 lineitem
|
||||
-> Seq Scan on lineitem_290001 lineitem
|
||||
Filter: (l_orderkey > 9030)
|
||||
SELECT true AS valid FROM explain_xml($$
|
||||
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030$$);
|
||||
|
@ -839,12 +823,12 @@ EXPLAIN (COSTS FALSE)
|
|||
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
Task Count: 1
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_290005 lineitem
|
||||
-> Seq Scan on lineitem_290001 lineitem
|
||||
Filter: (l_orderkey > 9030)
|
||||
-- Test re-partition join
|
||||
SET citus.large_table_shard_count TO 1;
|
||||
|
@ -862,7 +846,7 @@ Aggregate
|
|||
Map Task Count: 1
|
||||
Merge Task Count: 1
|
||||
-> MapMergeJob
|
||||
Map Task Count: 8
|
||||
Map Task Count: 2
|
||||
Merge Task Count: 1
|
||||
EXPLAIN (COSTS FALSE, FORMAT JSON)
|
||||
SELECT count(*)
|
||||
|
@ -893,7 +877,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
|
|||
"Merge Task Count": 1,
|
||||
"Depended Jobs": [
|
||||
{
|
||||
"Map Task Count": 8,
|
||||
"Map Task Count": 2,
|
||||
"Merge Task Count": 1
|
||||
}
|
||||
]
|
||||
|
@ -942,7 +926,7 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
|
|||
<Merge-Task-Count>1</Merge-Task-Count>
|
||||
<Depended-Jobs>
|
||||
<MapMergeJob>
|
||||
<Map-Task-Count>8</Map-Task-Count>
|
||||
<Map-Task-Count>2</Map-Task-Count>
|
||||
<Merge-Task-Count>1</Merge-Task-Count>
|
||||
</MapMergeJob>
|
||||
</Depended-Jobs>
|
||||
|
@ -999,7 +983,7 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
|
|||
- Map Task Count: 1
|
||||
Merge Task Count: 1
|
||||
Depended Jobs:
|
||||
- Map Task Count: 8
|
||||
- Map Task Count: 2
|
||||
Merge Task Count: 1
|
||||
-- test parallel aggregates
|
||||
SET parallel_setup_cost=0;
|
||||
|
@ -1020,8 +1004,8 @@ Finalize Aggregate
|
|||
EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem;
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
Task Count: 8
|
||||
Tasks Shown: One of 8
|
||||
Task Count: 2
|
||||
Tasks Shown: One of 2
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
|
@ -1032,12 +1016,12 @@ PREPARE task_tracker_query AS
|
|||
EXPLAIN (COSTS FALSE) EXECUTE task_tracker_query;
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
Task Count: 1
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_290005 lineitem
|
||||
-> Seq Scan on lineitem_290001 lineitem
|
||||
Filter: (l_orderkey > 9030)
|
||||
SET citus.task_executor_type TO 'real-time';
|
||||
PREPARE router_executor_query AS SELECT l_quantity FROM lineitem WHERE l_orderkey = 5;
|
||||
|
@ -1047,19 +1031,19 @@ Custom Scan (Citus Router) (cost=0.00..0.00 rows=0 width=0)
|
|||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem (cost=0.28..11.83 rows=3 width=5)
|
||||
-> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem (cost=0.28..13.60 rows=4 width=5)
|
||||
Index Cond: (l_orderkey = 5)
|
||||
PREPARE real_time_executor_query AS
|
||||
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
|
||||
EXPLAIN (COSTS FALSE) EXECUTE real_time_executor_query;
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Real-Time)
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
Task Count: 1
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_290005 lineitem
|
||||
-> Seq Scan on lineitem_290001 lineitem
|
||||
Filter: (l_orderkey > 9030)
|
||||
-- EXPLAIN EXECUTE of parametrized prepared statements is broken, but
|
||||
-- at least make sure to fail without crashing
|
||||
|
@ -1070,7 +1054,7 @@ Custom Scan (Citus Router) (cost=0.00..0.00 rows=0 width=0)
|
|||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem (cost=0.28..11.83 rows=3 width=5)
|
||||
-> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem (cost=0.28..13.60 rows=4 width=5)
|
||||
Index Cond: (l_orderkey = 5)
|
||||
-- test explain in a transaction with alter table to test we use right connections
|
||||
BEGIN;
|
||||
|
|
|
@ -47,8 +47,8 @@ Sort
|
|||
-> HashAggregate
|
||||
Group Key: remote_scan.l_quantity
|
||||
-> Custom Scan (Citus Real-Time)
|
||||
Task Count: 8
|
||||
Tasks Shown: One of 8
|
||||
Task Count: 2
|
||||
Tasks Shown: One of 2
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> HashAggregate
|
||||
|
@ -66,8 +66,8 @@ Sort
|
|||
-> Sort
|
||||
Sort Key: remote_scan.l_quantity
|
||||
-> Custom Scan (Citus Real-Time)
|
||||
Task Count: 8
|
||||
Tasks Shown: One of 8
|
||||
Task Count: 2
|
||||
Tasks Shown: One of 2
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> HashAggregate
|
||||
|
@ -100,8 +100,8 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
|
|||
"Parallel Aware": false,
|
||||
"Distributed Query": {
|
||||
"Job": {
|
||||
"Task Count": 8,
|
||||
"Tasks Shown": "One of 8",
|
||||
"Task Count": 2,
|
||||
"Tasks Shown": "One of 2",
|
||||
"Tasks": [
|
||||
{
|
||||
"Node": "host=localhost port=57637 dbname=regression",
|
||||
|
@ -175,8 +175,8 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
|
|||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Distributed-Query>
|
||||
<Job>
|
||||
<Task-Count>8</Task-Count>
|
||||
<Tasks-Shown>One of 8</Tasks-Shown>
|
||||
<Task-Count>2</Task-Count>
|
||||
<Tasks-Shown>One of 2</Tasks-Shown>
|
||||
<Tasks>
|
||||
<Task>
|
||||
<Node>host=localhost port=57637 dbname=regression</Node>
|
||||
|
@ -245,8 +245,8 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
|
|||
Parallel Aware: false
|
||||
Distributed Query:
|
||||
Job:
|
||||
Task Count: 8
|
||||
Tasks Shown: "One of 8"
|
||||
Task Count: 2
|
||||
Tasks Shown: "One of 2"
|
||||
Tasks:
|
||||
- Node: "host=localhost port=57637 dbname=regression"
|
||||
Remote Plan:
|
||||
|
@ -273,8 +273,8 @@ Sort
|
|||
-> HashAggregate
|
||||
Group Key: remote_scan.l_quantity
|
||||
-> Custom Scan (Citus Real-Time)
|
||||
Task Count: 8
|
||||
Tasks Shown: One of 8
|
||||
Task Count: 2
|
||||
Tasks Shown: One of 2
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> HashAggregate
|
||||
|
@ -287,8 +287,8 @@ Aggregate
|
|||
Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2")))
|
||||
-> Custom Scan (Citus Real-Time)
|
||||
Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2"
|
||||
Task Count: 8
|
||||
Tasks Shown: One of 8
|
||||
Task Count: 2
|
||||
Tasks Shown: One of 2
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
|
@ -304,20 +304,19 @@ Limit
|
|||
-> Sort
|
||||
Sort Key: remote_scan.l_quantity
|
||||
-> Custom Scan (Citus Real-Time)
|
||||
Task Count: 8
|
||||
Tasks Shown: One of 8
|
||||
Task Count: 2
|
||||
Tasks Shown: One of 2
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Limit
|
||||
-> Sort
|
||||
Sort Key: lineitem.l_quantity
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Sort
|
||||
Sort Key: lineitem.l_orderkey
|
||||
-> Seq Scan on lineitem_290001 lineitem
|
||||
Filter: (l_quantity < 5.0)
|
||||
-> Hash Join
|
||||
Hash Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Seq Scan on lineitem_290001 lineitem
|
||||
Filter: (l_quantity < 5.0)
|
||||
-> Hash
|
||||
-> Seq Scan on orders_290003 orders
|
||||
-- Test insert
|
||||
EXPLAIN (COSTS FALSE)
|
||||
INSERT INTO lineitem VALUES (1,0), (2, 0), (3, 0), (4, 0);
|
||||
|
@ -391,8 +390,8 @@ EXPLAIN (COSTS FALSE)
|
|||
CREATE TABLE explain_result AS
|
||||
SELECT * FROM lineitem;
|
||||
Custom Scan (Citus Real-Time)
|
||||
Task Count: 8
|
||||
Tasks Shown: One of 8
|
||||
Task Count: 2
|
||||
Tasks Shown: One of 2
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Seq Scan on lineitem_290001 lineitem
|
||||
|
@ -405,8 +404,8 @@ Aggregate
|
|||
Filter: (sum(remote_scan.worker_column_4) > '100'::numeric)
|
||||
-> Custom Scan (Citus Real-Time)
|
||||
Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2", remote_scan.worker_column_4
|
||||
Task Count: 8
|
||||
Tasks Shown: One of 8
|
||||
Task Count: 2
|
||||
Tasks Shown: One of 2
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
|
@ -424,8 +423,8 @@ HashAggregate
|
|||
Filter: ((remote_scan.worker_column_2)::double precision > ('100'::double precision * random()))
|
||||
-> Custom Scan (Citus Real-Time)
|
||||
Output: remote_scan.l_quantity, remote_scan.worker_column_2
|
||||
Task Count: 8
|
||||
Tasks Shown: One of 8
|
||||
Task Count: 2
|
||||
Tasks Shown: One of 2
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> HashAggregate
|
||||
|
@ -740,27 +739,12 @@ EXPLAIN (COSTS FALSE)
|
|||
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Real-Time)
|
||||
Task Count: 4
|
||||
Task Count: 1
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_290005 lineitem
|
||||
Filter: (l_orderkey > 9030)
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_290004 lineitem
|
||||
Filter: (l_orderkey > 9030)
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_290007 lineitem
|
||||
Filter: (l_orderkey > 9030)
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_290006 lineitem
|
||||
-> Seq Scan on lineitem_290001 lineitem
|
||||
Filter: (l_orderkey > 9030)
|
||||
SELECT true AS valid FROM explain_xml($$
|
||||
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030$$);
|
||||
|
@ -839,12 +823,12 @@ EXPLAIN (COSTS FALSE)
|
|||
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
Task Count: 1
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_290005 lineitem
|
||||
-> Seq Scan on lineitem_290001 lineitem
|
||||
Filter: (l_orderkey > 9030)
|
||||
-- Test re-partition join
|
||||
SET citus.large_table_shard_count TO 1;
|
||||
|
@ -862,7 +846,7 @@ Aggregate
|
|||
Map Task Count: 1
|
||||
Merge Task Count: 1
|
||||
-> MapMergeJob
|
||||
Map Task Count: 8
|
||||
Map Task Count: 2
|
||||
Merge Task Count: 1
|
||||
EXPLAIN (COSTS FALSE, FORMAT JSON)
|
||||
SELECT count(*)
|
||||
|
@ -893,7 +877,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
|
|||
"Merge Task Count": 1,
|
||||
"Depended Jobs": [
|
||||
{
|
||||
"Map Task Count": 8,
|
||||
"Map Task Count": 2,
|
||||
"Merge Task Count": 1
|
||||
}
|
||||
]
|
||||
|
@ -942,7 +926,7 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
|
|||
<Merge-Task-Count>1</Merge-Task-Count>
|
||||
<Depended-Jobs>
|
||||
<MapMergeJob>
|
||||
<Map-Task-Count>8</Map-Task-Count>
|
||||
<Map-Task-Count>2</Map-Task-Count>
|
||||
<Merge-Task-Count>1</Merge-Task-Count>
|
||||
</MapMergeJob>
|
||||
</Depended-Jobs>
|
||||
|
@ -999,7 +983,7 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
|
|||
- Map Task Count: 1
|
||||
Merge Task Count: 1
|
||||
Depended Jobs:
|
||||
- Map Task Count: 8
|
||||
- Map Task Count: 2
|
||||
Merge Task Count: 1
|
||||
-- test parallel aggregates
|
||||
SET parallel_setup_cost=0;
|
||||
|
@ -1020,8 +1004,8 @@ Finalize Aggregate
|
|||
EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem;
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
Task Count: 8
|
||||
Tasks Shown: One of 8
|
||||
Task Count: 2
|
||||
Tasks Shown: One of 2
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
|
@ -1032,12 +1016,12 @@ PREPARE task_tracker_query AS
|
|||
EXPLAIN (COSTS FALSE) EXECUTE task_tracker_query;
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
Task Count: 1
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_290005 lineitem
|
||||
-> Seq Scan on lineitem_290001 lineitem
|
||||
Filter: (l_orderkey > 9030)
|
||||
SET citus.task_executor_type TO 'real-time';
|
||||
PREPARE router_executor_query AS SELECT l_quantity FROM lineitem WHERE l_orderkey = 5;
|
||||
|
@ -1047,19 +1031,19 @@ Custom Scan (Citus Router) (cost=0.00..0.00 rows=0 width=0)
|
|||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem (cost=0.28..11.83 rows=3 width=5)
|
||||
-> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem (cost=0.28..13.60 rows=4 width=5)
|
||||
Index Cond: (l_orderkey = 5)
|
||||
PREPARE real_time_executor_query AS
|
||||
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
|
||||
EXPLAIN (COSTS FALSE) EXECUTE real_time_executor_query;
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Real-Time)
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
Task Count: 1
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_290005 lineitem
|
||||
-> Seq Scan on lineitem_290001 lineitem
|
||||
Filter: (l_orderkey > 9030)
|
||||
-- EXPLAIN EXECUTE of parametrized prepared statements is broken, but
|
||||
-- at least make sure to fail without crashing
|
||||
|
@ -1070,7 +1054,7 @@ Custom Scan (Citus Router) (cost=0.00..0.00 rows=0 width=0)
|
|||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem (cost=0.28..11.83 rows=3 width=5)
|
||||
-> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem (cost=0.28..13.60 rows=4 width=5)
|
||||
Index Cond: (l_orderkey = 5)
|
||||
-- test explain in a transaction with alter table to test we use right connections
|
||||
BEGIN;
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
--
|
||||
-- Tests for shard and join pruning logic on hash partitioned tables.
|
||||
SET citus.next_shard_id TO 630000;
|
||||
SET citus.shard_count to 4;
|
||||
SET citus.shard_replication_factor to 1;
|
||||
-- Create a table partitioned on integer column and update partition type to
|
||||
-- hash. Then load data into this table and update shard min max values with
|
||||
-- hashed ones. Hash value of 1, 2, 3 and 4 are consecutively -1905060026,
|
||||
|
@ -17,15 +19,9 @@ CREATE TABLE orders_hash_partitioned (
|
|||
o_clerk char(15),
|
||||
o_shippriority integer,
|
||||
o_comment varchar(79) );
|
||||
SELECT master_create_distributed_table('orders_hash_partitioned', 'o_orderkey', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('orders_hash_partitioned', 4, 1);
|
||||
master_create_worker_shards
|
||||
-----------------------------
|
||||
SELECT create_distributed_table('orders_hash_partitioned', 'o_orderkey');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
|
|
@ -59,8 +59,8 @@ EXPLAIN (COSTS FALSE)
|
|||
Group Key: remote_scan.l_orderkey
|
||||
Filter: (sum(remote_scan.worker_column_3) > '24'::numeric)
|
||||
-> Custom Scan (Citus Real-Time)
|
||||
Task Count: 8
|
||||
Tasks Shown: One of 8
|
||||
Task Count: 2
|
||||
Tasks Shown: One of 2
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> HashAggregate
|
||||
|
|
|
@ -7,6 +7,8 @@ SET citus.explain_distributed_queries TO off;
|
|||
SET citus.log_multi_join_order TO TRUE;
|
||||
SET citus.task_executor_type = 'task-tracker'; -- can't explain all queries otherwise
|
||||
SET client_min_messages TO DEBUG2;
|
||||
SET citus.shard_count to 2;
|
||||
SET citus.shard_replication_factor to 1;
|
||||
-- Create new table definitions for use in testing in distributed planning and
|
||||
-- execution functionality. Also create indexes to boost performance.
|
||||
CREATE TABLE lineitem_hash (
|
||||
|
@ -29,15 +31,9 @@ CREATE TABLE lineitem_hash (
|
|||
PRIMARY KEY(l_orderkey, l_linenumber) );
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "lineitem_hash_pkey" for table "lineitem_hash"
|
||||
DEBUG: building index "lineitem_hash_pkey" on table "lineitem_hash"
|
||||
SELECT master_create_distributed_table('lineitem_hash', 'l_orderkey', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('lineitem_hash', 2, 1);
|
||||
master_create_worker_shards
|
||||
-----------------------------
|
||||
SELECT create_distributed_table('lineitem_hash', 'l_orderkey');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -56,15 +52,9 @@ CREATE TABLE orders_hash (
|
|||
PRIMARY KEY(o_orderkey) );
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "orders_hash_pkey" for table "orders_hash"
|
||||
DEBUG: building index "orders_hash_pkey" on table "orders_hash"
|
||||
SELECT master_create_distributed_table('orders_hash', 'o_orderkey', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('orders_hash', 2, 1);
|
||||
master_create_worker_shards
|
||||
-----------------------------
|
||||
SELECT create_distributed_table('orders_hash', 'o_orderkey');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -77,66 +67,18 @@ CREATE TABLE customer_hash (
|
|||
c_acctbal decimal(15,2) not null,
|
||||
c_mktsegment char(10) not null,
|
||||
c_comment varchar(117) not null);
|
||||
SELECT master_create_distributed_table('customer_hash', 'c_custkey', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('customer_hash', 2, 1);
|
||||
master_create_worker_shards
|
||||
-----------------------------
|
||||
SELECT create_distributed_table('customer_hash', 'c_custkey');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- The following query checks that we can correctly handle self-joins
|
||||
EXPLAIN SELECT l1.l_quantity FROM lineitem l1, lineitem l2
|
||||
WHERE l1.l_orderkey = l2.l_orderkey AND l1.l_quantity > 5;
|
||||
LOG: join order: [ "lineitem" ][ local partition join "lineitem" ]
|
||||
DEBUG: join prunable for intervals [1,1509] and [2951,4455]
|
||||
DEBUG: join prunable for intervals [1,1509] and [4480,5986]
|
||||
DEBUG: join prunable for intervals [1,1509] and [8997,10560]
|
||||
DEBUG: join prunable for intervals [1,1509] and [10560,12036]
|
||||
DEBUG: join prunable for intervals [1,1509] and [12036,13473]
|
||||
DEBUG: join prunable for intervals [1,1509] and [13473,14947]
|
||||
DEBUG: join prunable for intervals [1509,4964] and [8997,10560]
|
||||
DEBUG: join prunable for intervals [1509,4964] and [10560,12036]
|
||||
DEBUG: join prunable for intervals [1509,4964] and [12036,13473]
|
||||
DEBUG: join prunable for intervals [1509,4964] and [13473,14947]
|
||||
DEBUG: join prunable for intervals [2951,4455] and [1,1509]
|
||||
DEBUG: join prunable for intervals [2951,4455] and [4480,5986]
|
||||
DEBUG: join prunable for intervals [2951,4455] and [8997,10560]
|
||||
DEBUG: join prunable for intervals [2951,4455] and [10560,12036]
|
||||
DEBUG: join prunable for intervals [2951,4455] and [12036,13473]
|
||||
DEBUG: join prunable for intervals [2951,4455] and [13473,14947]
|
||||
DEBUG: join prunable for intervals [4480,5986] and [1,1509]
|
||||
DEBUG: join prunable for intervals [4480,5986] and [2951,4455]
|
||||
DEBUG: join prunable for intervals [4480,5986] and [8997,10560]
|
||||
DEBUG: join prunable for intervals [4480,5986] and [10560,12036]
|
||||
DEBUG: join prunable for intervals [4480,5986] and [12036,13473]
|
||||
DEBUG: join prunable for intervals [4480,5986] and [13473,14947]
|
||||
DEBUG: join prunable for intervals [8997,10560] and [1,1509]
|
||||
DEBUG: join prunable for intervals [8997,10560] and [1509,4964]
|
||||
DEBUG: join prunable for intervals [8997,10560] and [2951,4455]
|
||||
DEBUG: join prunable for intervals [8997,10560] and [4480,5986]
|
||||
DEBUG: join prunable for intervals [8997,10560] and [12036,13473]
|
||||
DEBUG: join prunable for intervals [8997,10560] and [13473,14947]
|
||||
DEBUG: join prunable for intervals [10560,12036] and [1,1509]
|
||||
DEBUG: join prunable for intervals [10560,12036] and [1509,4964]
|
||||
DEBUG: join prunable for intervals [10560,12036] and [2951,4455]
|
||||
DEBUG: join prunable for intervals [10560,12036] and [4480,5986]
|
||||
DEBUG: join prunable for intervals [10560,12036] and [13473,14947]
|
||||
DEBUG: join prunable for intervals [12036,13473] and [1,1509]
|
||||
DEBUG: join prunable for intervals [12036,13473] and [1509,4964]
|
||||
DEBUG: join prunable for intervals [12036,13473] and [2951,4455]
|
||||
DEBUG: join prunable for intervals [12036,13473] and [4480,5986]
|
||||
DEBUG: join prunable for intervals [12036,13473] and [8997,10560]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [1,1509]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [1509,4964]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [2951,4455]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [4480,5986]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [8997,10560]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [10560,12036]
|
||||
LOG: join order: [ "lineitem" ][ broadcast join "lineitem" ]
|
||||
DEBUG: join prunable for intervals [1,5986] and [8997,14947]
|
||||
DEBUG: join prunable for intervals [8997,14947] and [1,5986]
|
||||
QUERY PLAN
|
||||
--------------------------------------------------------------------
|
||||
Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0)
|
||||
|
|
|
@ -10,28 +10,19 @@ SET client_min_messages TO DEBUG2;
|
|||
SET citus.large_table_shard_count TO 2;
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_orderkey = o_orderkey;
|
||||
DEBUG: join prunable for intervals [1,1509] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [1509,2951] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [2951,4455] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [4480,5986] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
|
||||
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
|
||||
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
|
||||
DEBUG: join prunable for intervals [1,5986] and [8997,14947]
|
||||
DEBUG: join prunable for intervals [8997,14947] and [1,5986]
|
||||
sum | avg
|
||||
-------+--------------------
|
||||
36086 | 3.0076679446574429
|
||||
36089 | 3.0074166666666667
|
||||
(1 row)
|
||||
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_orderkey = o_orderkey AND l_orderkey > 9030;
|
||||
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
|
||||
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
|
||||
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
|
||||
DEBUG: join prunable for intervals [8997,14947] and [1,5986]
|
||||
sum | avg
|
||||
-------+--------------------
|
||||
17996 | 3.0194630872483221
|
||||
17999 | 3.0189533713518953
|
||||
(1 row)
|
||||
|
||||
-- Shards for the lineitem table have been pruned away. Check that join pruning
|
||||
|
@ -48,10 +39,6 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
|||
-- out all the shards, and leave us with an empty task list.
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_orderkey = o_orderkey AND l_orderkey > 6000 AND o_orderkey < 6000;
|
||||
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
|
||||
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
|
||||
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
|
||||
sum | avg
|
||||
-----+-----
|
||||
|
|
||||
|
|
|
@ -41,48 +41,24 @@ GROUP BY
|
|||
l_partkey, o_orderkey
|
||||
ORDER BY
|
||||
l_partkey, o_orderkey;
|
||||
DEBUG: join prunable for intervals [1,1509] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [1509,4964] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [2951,4455] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [4480,5986] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
|
||||
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
|
||||
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
|
||||
DEBUG: join prunable for intervals [1,5986] and [8997,14947]
|
||||
DEBUG: join prunable for intervals [8997,14947] and [1,5986]
|
||||
DEBUG: generated sql query for task 1
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290000 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290000 lineitem JOIN orders_290002 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290001 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DEBUG: generated sql query for task 3
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290002 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DEBUG: generated sql query for task 4
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290003 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DEBUG: generated sql query for task 5
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290004 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DEBUG: generated sql query for task 6
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290005 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DEBUG: generated sql query for task 7
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290006 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DEBUG: generated sql query for task 8
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290007 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290001 lineitem JOIN orders_290003 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 5 to node localhost:57638
|
||||
DEBUG: assigned task 8 to node localhost:57637
|
||||
DEBUG: assigned task 7 to node localhost:57638
|
||||
DEBUG: join prunable for intervals [1,1000] and [6001,7000]
|
||||
DEBUG: join prunable for intervals [6001,7000] and [1,1000]
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0001.task_000009".intermediate_column_1_0, "pg_merge_job_0001.task_000009".intermediate_column_1_1, "pg_merge_job_0001.task_000009".intermediate_column_1_2, "pg_merge_job_0001.task_000009".intermediate_column_1_3, "pg_merge_job_0001.task_000009".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000009 "pg_merge_job_0001.task_000009" JOIN part_290011 part ON (("pg_merge_job_0001.task_000009".intermediate_column_1_0 = part.p_partkey))) WHERE (part.p_size > 8)"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0001.task_000003".intermediate_column_1_0, "pg_merge_job_0001.task_000003".intermediate_column_1_1, "pg_merge_job_0001.task_000003".intermediate_column_1_2, "pg_merge_job_0001.task_000003".intermediate_column_1_3, "pg_merge_job_0001.task_000003".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000003 "pg_merge_job_0001.task_000003" JOIN part_290005 part ON (("pg_merge_job_0001.task_000003".intermediate_column_1_0 = part.p_partkey))) WHERE (part.p_size > 8)"
|
||||
DEBUG: generated sql query for task 4
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0001.task_000018".intermediate_column_1_0, "pg_merge_job_0001.task_000018".intermediate_column_1_1, "pg_merge_job_0001.task_000018".intermediate_column_1_2, "pg_merge_job_0001.task_000018".intermediate_column_1_3, "pg_merge_job_0001.task_000018".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000018 "pg_merge_job_0001.task_000018" JOIN part_280002 part ON (("pg_merge_job_0001.task_000018".intermediate_column_1_0 = part.p_partkey))) WHERE (part.p_size > 8)"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0001.task_000006".intermediate_column_1_0, "pg_merge_job_0001.task_000006".intermediate_column_1_1, "pg_merge_job_0001.task_000006".intermediate_column_1_2, "pg_merge_job_0001.task_000006".intermediate_column_1_3, "pg_merge_job_0001.task_000006".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000006 "pg_merge_job_0001.task_000006" JOIN part_280002 part ON (("pg_merge_job_0001.task_000006".intermediate_column_1_0 = part.p_partkey))) WHERE (part.p_size > 8)"
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 3
|
||||
DETAIL: Creating dependency on merge taskId 18
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 4 to node localhost:57638
|
||||
DEBUG: join prunable for intervals [1,1000] and [1001,2000]
|
||||
|
@ -92,7 +68,7 @@ DEBUG: join prunable for intervals [1001,2000] and [6001,7000]
|
|||
DEBUG: join prunable for intervals [6001,7000] and [1,1000]
|
||||
DEBUG: join prunable for intervals [6001,7000] and [1001,2000]
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0002.task_000005".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000005".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000005 "pg_merge_job_0002.task_000005" JOIN customer_290010 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000005".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000005".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000005".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000005".intermediate_column_2_0, "pg_merge_job_0002.task_000005".intermediate_column_2_1"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0002.task_000005".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000005".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000005 "pg_merge_job_0002.task_000005" JOIN customer_290004 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000005".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000005".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000005".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000005".intermediate_column_2_0, "pg_merge_job_0002.task_000005".intermediate_column_2_1"
|
||||
DEBUG: generated sql query for task 4
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0002.task_000008".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000008".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000008 "pg_merge_job_0002.task_000008" JOIN customer_280001 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000008".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000008".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000008".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000008".intermediate_column_2_0, "pg_merge_job_0002.task_000008".intermediate_column_2_1"
|
||||
DEBUG: generated sql query for task 6
|
||||
|
@ -160,30 +136,12 @@ DEBUG: generated sql query for task 1
|
|||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290000 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290001 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 3
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290002 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 4
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290003 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 5
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290004 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 6
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290005 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 7
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290006 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 8
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290007 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 5 to node localhost:57638
|
||||
DEBUG: assigned task 8 to node localhost:57637
|
||||
DEBUG: assigned task 7 to node localhost:57638
|
||||
DEBUG: generated sql query for task 1
|
||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290008 orders WHERE (o_totalprice <> 4.0)"
|
||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290002 orders WHERE (o_totalprice <> 4.0)"
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290009 orders WHERE (o_totalprice <> 4.0)"
|
||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290003 orders WHERE (o_totalprice <> 4.0)"
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
|
@ -199,27 +157,27 @@ DEBUG: join prunable for task partitionId 3 and 0
|
|||
DEBUG: join prunable for task partitionId 3 and 1
|
||||
DEBUG: join prunable for task partitionId 3 and 2
|
||||
DEBUG: generated sql query for task 3
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000009".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000003".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000009 "pg_merge_job_0004.task_000009" JOIN pg_merge_job_0005.task_000003 "pg_merge_job_0005.task_000003" ON (("pg_merge_job_0004.task_000009".intermediate_column_4_1 = "pg_merge_job_0005.task_000003".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000009".intermediate_column_4_0, "pg_merge_job_0005.task_000003".intermediate_column_5_0"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000003".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000003".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000003 "pg_merge_job_0004.task_000003" JOIN pg_merge_job_0005.task_000003 "pg_merge_job_0005.task_000003" ON (("pg_merge_job_0004.task_000003".intermediate_column_4_1 = "pg_merge_job_0005.task_000003".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000003".intermediate_column_4_0, "pg_merge_job_0005.task_000003".intermediate_column_5_0"
|
||||
DEBUG: generated sql query for task 6
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000018".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000006".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000018 "pg_merge_job_0004.task_000018" JOIN pg_merge_job_0005.task_000006 "pg_merge_job_0005.task_000006" ON (("pg_merge_job_0004.task_000018".intermediate_column_4_1 = "pg_merge_job_0005.task_000006".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000018".intermediate_column_4_0, "pg_merge_job_0005.task_000006".intermediate_column_5_0"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000006".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000006".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000006 "pg_merge_job_0004.task_000006" JOIN pg_merge_job_0005.task_000006 "pg_merge_job_0005.task_000006" ON (("pg_merge_job_0004.task_000006".intermediate_column_4_1 = "pg_merge_job_0005.task_000006".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000006".intermediate_column_4_0, "pg_merge_job_0005.task_000006".intermediate_column_5_0"
|
||||
DEBUG: generated sql query for task 9
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000027".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000009".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000027 "pg_merge_job_0004.task_000027" JOIN pg_merge_job_0005.task_000009 "pg_merge_job_0005.task_000009" ON (("pg_merge_job_0004.task_000027".intermediate_column_4_1 = "pg_merge_job_0005.task_000009".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000027".intermediate_column_4_0, "pg_merge_job_0005.task_000009".intermediate_column_5_0"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000009".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000009".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000009 "pg_merge_job_0004.task_000009" JOIN pg_merge_job_0005.task_000009 "pg_merge_job_0005.task_000009" ON (("pg_merge_job_0004.task_000009".intermediate_column_4_1 = "pg_merge_job_0005.task_000009".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000009".intermediate_column_4_0, "pg_merge_job_0005.task_000009".intermediate_column_5_0"
|
||||
DEBUG: generated sql query for task 12
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000036".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000012".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000036 "pg_merge_job_0004.task_000036" JOIN pg_merge_job_0005.task_000012 "pg_merge_job_0005.task_000012" ON (("pg_merge_job_0004.task_000036".intermediate_column_4_1 = "pg_merge_job_0005.task_000012".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000036".intermediate_column_4_0, "pg_merge_job_0005.task_000012".intermediate_column_5_0"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000012".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000012".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000012 "pg_merge_job_0004.task_000012" JOIN pg_merge_job_0005.task_000012 "pg_merge_job_0005.task_000012" ON (("pg_merge_job_0004.task_000012".intermediate_column_4_1 = "pg_merge_job_0005.task_000012".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000012".intermediate_column_4_0, "pg_merge_job_0005.task_000012".intermediate_column_5_0"
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 2
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 18
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 27
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 8
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 10
|
||||
DETAIL: Creating dependency on merge taskId 36
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
|
|
|
@ -49,48 +49,24 @@ GROUP BY
|
|||
ORDER BY
|
||||
l_partkey, o_orderkey;
|
||||
DEBUG: StartTransactionCommand
|
||||
DEBUG: join prunable for intervals [1,1509] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [1509,4964] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [2951,4455] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [4480,5986] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
|
||||
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
|
||||
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
|
||||
DEBUG: join prunable for intervals [1,5986] and [8997,14947]
|
||||
DEBUG: join prunable for intervals [8997,14947] and [1,5986]
|
||||
DEBUG: generated sql query for task 1
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290000 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290000 lineitem JOIN orders_290002 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290001 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DEBUG: generated sql query for task 3
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290002 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DEBUG: generated sql query for task 4
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290003 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DEBUG: generated sql query for task 5
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290004 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DEBUG: generated sql query for task 6
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290005 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DEBUG: generated sql query for task 7
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290006 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DEBUG: generated sql query for task 8
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290007 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290001 lineitem JOIN orders_290003 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 5 to node localhost:57638
|
||||
DEBUG: assigned task 8 to node localhost:57637
|
||||
DEBUG: assigned task 7 to node localhost:57638
|
||||
DEBUG: join prunable for intervals [1,1000] and [6001,7000]
|
||||
DEBUG: join prunable for intervals [6001,7000] and [1,1000]
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0001.task_000009".intermediate_column_1_0, "pg_merge_job_0001.task_000009".intermediate_column_1_1, "pg_merge_job_0001.task_000009".intermediate_column_1_2, "pg_merge_job_0001.task_000009".intermediate_column_1_3, "pg_merge_job_0001.task_000009".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000009 "pg_merge_job_0001.task_000009" JOIN part_290011 part ON (("pg_merge_job_0001.task_000009".intermediate_column_1_0 = part.p_partkey))) WHERE (part.p_size > 8)"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0001.task_000003".intermediate_column_1_0, "pg_merge_job_0001.task_000003".intermediate_column_1_1, "pg_merge_job_0001.task_000003".intermediate_column_1_2, "pg_merge_job_0001.task_000003".intermediate_column_1_3, "pg_merge_job_0001.task_000003".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000003 "pg_merge_job_0001.task_000003" JOIN part_290005 part ON (("pg_merge_job_0001.task_000003".intermediate_column_1_0 = part.p_partkey))) WHERE (part.p_size > 8)"
|
||||
DEBUG: generated sql query for task 4
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0001.task_000018".intermediate_column_1_0, "pg_merge_job_0001.task_000018".intermediate_column_1_1, "pg_merge_job_0001.task_000018".intermediate_column_1_2, "pg_merge_job_0001.task_000018".intermediate_column_1_3, "pg_merge_job_0001.task_000018".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000018 "pg_merge_job_0001.task_000018" JOIN part_280002 part ON (("pg_merge_job_0001.task_000018".intermediate_column_1_0 = part.p_partkey))) WHERE (part.p_size > 8)"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0001.task_000006".intermediate_column_1_0, "pg_merge_job_0001.task_000006".intermediate_column_1_1, "pg_merge_job_0001.task_000006".intermediate_column_1_2, "pg_merge_job_0001.task_000006".intermediate_column_1_3, "pg_merge_job_0001.task_000006".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000006 "pg_merge_job_0001.task_000006" JOIN part_280002 part ON (("pg_merge_job_0001.task_000006".intermediate_column_1_0 = part.p_partkey))) WHERE (part.p_size > 8)"
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 3
|
||||
DETAIL: Creating dependency on merge taskId 18
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 4 to node localhost:57638
|
||||
DEBUG: join prunable for intervals [1,1000] and [1001,2000]
|
||||
|
@ -100,7 +76,7 @@ DEBUG: join prunable for intervals [1001,2000] and [6001,7000]
|
|||
DEBUG: join prunable for intervals [6001,7000] and [1,1000]
|
||||
DEBUG: join prunable for intervals [6001,7000] and [1001,2000]
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0002.task_000005".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000005".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000005 "pg_merge_job_0002.task_000005" JOIN customer_290010 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000005".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000005".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000005".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000005".intermediate_column_2_0, "pg_merge_job_0002.task_000005".intermediate_column_2_1"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0002.task_000005".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000005".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000005 "pg_merge_job_0002.task_000005" JOIN customer_290004 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000005".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000005".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000005".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000005".intermediate_column_2_0, "pg_merge_job_0002.task_000005".intermediate_column_2_1"
|
||||
DEBUG: generated sql query for task 4
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0002.task_000008".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000008".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000008 "pg_merge_job_0002.task_000008" JOIN customer_280001 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000008".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000008".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000008".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000008".intermediate_column_2_0, "pg_merge_job_0002.task_000008".intermediate_column_2_1"
|
||||
DEBUG: generated sql query for task 6
|
||||
|
@ -170,30 +146,12 @@ DEBUG: generated sql query for task 1
|
|||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290000 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290001 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 3
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290002 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 4
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290003 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 5
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290004 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 6
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290005 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 7
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290006 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 8
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290007 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 5 to node localhost:57638
|
||||
DEBUG: assigned task 8 to node localhost:57637
|
||||
DEBUG: assigned task 7 to node localhost:57638
|
||||
DEBUG: generated sql query for task 1
|
||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290008 orders WHERE (o_totalprice <> 4.0)"
|
||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290002 orders WHERE (o_totalprice <> 4.0)"
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290009 orders WHERE (o_totalprice <> 4.0)"
|
||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290003 orders WHERE (o_totalprice <> 4.0)"
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
|
@ -209,27 +167,27 @@ DEBUG: join prunable for task partitionId 3 and 0
|
|||
DEBUG: join prunable for task partitionId 3 and 1
|
||||
DEBUG: join prunable for task partitionId 3 and 2
|
||||
DEBUG: generated sql query for task 3
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000009".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000003".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000009 "pg_merge_job_0004.task_000009" JOIN pg_merge_job_0005.task_000003 "pg_merge_job_0005.task_000003" ON (("pg_merge_job_0004.task_000009".intermediate_column_4_1 = "pg_merge_job_0005.task_000003".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000009".intermediate_column_4_0, "pg_merge_job_0005.task_000003".intermediate_column_5_0"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000003".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000003".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000003 "pg_merge_job_0004.task_000003" JOIN pg_merge_job_0005.task_000003 "pg_merge_job_0005.task_000003" ON (("pg_merge_job_0004.task_000003".intermediate_column_4_1 = "pg_merge_job_0005.task_000003".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000003".intermediate_column_4_0, "pg_merge_job_0005.task_000003".intermediate_column_5_0"
|
||||
DEBUG: generated sql query for task 6
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000018".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000006".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000018 "pg_merge_job_0004.task_000018" JOIN pg_merge_job_0005.task_000006 "pg_merge_job_0005.task_000006" ON (("pg_merge_job_0004.task_000018".intermediate_column_4_1 = "pg_merge_job_0005.task_000006".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000018".intermediate_column_4_0, "pg_merge_job_0005.task_000006".intermediate_column_5_0"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000006".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000006".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000006 "pg_merge_job_0004.task_000006" JOIN pg_merge_job_0005.task_000006 "pg_merge_job_0005.task_000006" ON (("pg_merge_job_0004.task_000006".intermediate_column_4_1 = "pg_merge_job_0005.task_000006".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000006".intermediate_column_4_0, "pg_merge_job_0005.task_000006".intermediate_column_5_0"
|
||||
DEBUG: generated sql query for task 9
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000027".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000009".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000027 "pg_merge_job_0004.task_000027" JOIN pg_merge_job_0005.task_000009 "pg_merge_job_0005.task_000009" ON (("pg_merge_job_0004.task_000027".intermediate_column_4_1 = "pg_merge_job_0005.task_000009".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000027".intermediate_column_4_0, "pg_merge_job_0005.task_000009".intermediate_column_5_0"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000009".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000009".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000009 "pg_merge_job_0004.task_000009" JOIN pg_merge_job_0005.task_000009 "pg_merge_job_0005.task_000009" ON (("pg_merge_job_0004.task_000009".intermediate_column_4_1 = "pg_merge_job_0005.task_000009".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000009".intermediate_column_4_0, "pg_merge_job_0005.task_000009".intermediate_column_5_0"
|
||||
DEBUG: generated sql query for task 12
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000036".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000012".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000036 "pg_merge_job_0004.task_000036" JOIN pg_merge_job_0005.task_000012 "pg_merge_job_0005.task_000012" ON (("pg_merge_job_0004.task_000036".intermediate_column_4_1 = "pg_merge_job_0005.task_000012".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000036".intermediate_column_4_0, "pg_merge_job_0005.task_000012".intermediate_column_5_0"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000012".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000012".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000012 "pg_merge_job_0004.task_000012" JOIN pg_merge_job_0005.task_000012 "pg_merge_job_0005.task_000012" ON (("pg_merge_job_0004.task_000012".intermediate_column_4_1 = "pg_merge_job_0005.task_000012".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000012".intermediate_column_4_0, "pg_merge_job_0005.task_000012".intermediate_column_5_0"
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 2
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 18
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 27
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 8
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 10
|
||||
DETAIL: Creating dependency on merge taskId 36
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
|
|
|
@ -59,7 +59,7 @@ DEBUG: pruning merge fetch taskId 5
|
|||
DETAIL: Creating dependency on merge taskId 9
|
||||
count
|
||||
-------
|
||||
2984
|
||||
2985
|
||||
(1 row)
|
||||
|
||||
-- Single range-repartition join with a selection clause on the partitioned
|
||||
|
@ -151,19 +151,19 @@ DEBUG: join prunable for task partitionId 3 and 0
|
|||
DEBUG: join prunable for task partitionId 3 and 1
|
||||
DEBUG: join prunable for task partitionId 3 and 2
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 2
|
||||
DETAIL: Creating dependency on merge taskId 4
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 18
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 8
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 27
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 8
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: pruning merge fetch taskId 10
|
||||
DETAIL: Creating dependency on merge taskId 36
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 16
|
||||
QUERY PLAN
|
||||
|
@ -173,7 +173,7 @@ DETAIL: Creating dependency on merge taskId 16
|
|||
Task Count: 4
|
||||
Tasks Shown: None, not supported for re-partition queries
|
||||
-> MapMergeJob
|
||||
Map Task Count: 8
|
||||
Map Task Count: 2
|
||||
Merge Task Count: 4
|
||||
-> MapMergeJob
|
||||
Map Task Count: 3
|
||||
|
@ -199,19 +199,19 @@ DEBUG: join prunable for task partitionId 3 and 0
|
|||
DEBUG: join prunable for task partitionId 3 and 1
|
||||
DEBUG: join prunable for task partitionId 3 and 2
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 2
|
||||
DETAIL: Creating dependency on merge taskId 4
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 18
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 8
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 27
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 8
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: pruning merge fetch taskId 10
|
||||
DETAIL: Creating dependency on merge taskId 36
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 16
|
||||
count
|
||||
|
|
|
@ -45,7 +45,7 @@ DEBUG: assigned task 6 to node localhost:57638
|
|||
DEBUG: assigned task 2 to node localhost:57637
|
||||
count
|
||||
-------
|
||||
2984
|
||||
2985
|
||||
(1 row)
|
||||
|
||||
-- Single range repartition join, along with a join with a small table containing
|
||||
|
@ -60,119 +60,21 @@ FROM
|
|||
WHERE
|
||||
o_custkey = c_custkey AND
|
||||
o_orderkey = l_orderkey;
|
||||
DEBUG: join prunable for intervals [1,5986] and [8997,14947]
|
||||
DEBUG: join prunable for intervals [8997,14947] and [1,5986]
|
||||
DEBUG: join prunable for intervals [1,5986] and [8997,14947]
|
||||
DEBUG: join prunable for intervals [8997,14947] and [1,5986]
|
||||
DEBUG: join prunable for intervals [1,5986] and [8997,14947]
|
||||
DEBUG: join prunable for intervals [8997,14947] and [1,5986]
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DEBUG: assigned task 5 to node localhost:57638
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 1 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: join prunable for intervals [1,1509] and [2951,4455]
|
||||
DEBUG: join prunable for intervals [1,1509] and [4480,5986]
|
||||
DEBUG: join prunable for intervals [1,1509] and [8997,10560]
|
||||
DEBUG: join prunable for intervals [1,1509] and [10560,12036]
|
||||
DEBUG: join prunable for intervals [1,1509] and [12036,13473]
|
||||
DEBUG: join prunable for intervals [1,1509] and [13473,14947]
|
||||
DEBUG: join prunable for intervals [1509,4964] and [8997,10560]
|
||||
DEBUG: join prunable for intervals [1509,4964] and [10560,12036]
|
||||
DEBUG: join prunable for intervals [1509,4964] and [12036,13473]
|
||||
DEBUG: join prunable for intervals [1509,4964] and [13473,14947]
|
||||
DEBUG: join prunable for intervals [2951,4455] and [1,1509]
|
||||
DEBUG: join prunable for intervals [2951,4455] and [4480,5986]
|
||||
DEBUG: join prunable for intervals [2951,4455] and [8997,10560]
|
||||
DEBUG: join prunable for intervals [2951,4455] and [10560,12036]
|
||||
DEBUG: join prunable for intervals [2951,4455] and [12036,13473]
|
||||
DEBUG: join prunable for intervals [2951,4455] and [13473,14947]
|
||||
DEBUG: join prunable for intervals [4480,5986] and [1,1509]
|
||||
DEBUG: join prunable for intervals [4480,5986] and [2951,4455]
|
||||
DEBUG: join prunable for intervals [4480,5986] and [8997,10560]
|
||||
DEBUG: join prunable for intervals [4480,5986] and [10560,12036]
|
||||
DEBUG: join prunable for intervals [4480,5986] and [12036,13473]
|
||||
DEBUG: join prunable for intervals [4480,5986] and [13473,14947]
|
||||
DEBUG: join prunable for intervals [8997,10560] and [1,1509]
|
||||
DEBUG: join prunable for intervals [8997,10560] and [1509,4964]
|
||||
DEBUG: join prunable for intervals [8997,10560] and [2951,4455]
|
||||
DEBUG: join prunable for intervals [8997,10560] and [4480,5986]
|
||||
DEBUG: join prunable for intervals [8997,10560] and [12036,13473]
|
||||
DEBUG: join prunable for intervals [8997,10560] and [13473,14947]
|
||||
DEBUG: join prunable for intervals [10560,12036] and [1,1509]
|
||||
DEBUG: join prunable for intervals [10560,12036] and [1509,4964]
|
||||
DEBUG: join prunable for intervals [10560,12036] and [2951,4455]
|
||||
DEBUG: join prunable for intervals [10560,12036] and [4480,5986]
|
||||
DEBUG: join prunable for intervals [10560,12036] and [13473,14947]
|
||||
DEBUG: join prunable for intervals [12036,13473] and [1,1509]
|
||||
DEBUG: join prunable for intervals [12036,13473] and [1509,4964]
|
||||
DEBUG: join prunable for intervals [12036,13473] and [2951,4455]
|
||||
DEBUG: join prunable for intervals [12036,13473] and [4480,5986]
|
||||
DEBUG: join prunable for intervals [12036,13473] and [8997,10560]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [1,1509]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [1509,4964]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [2951,4455]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [4480,5986]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [8997,10560]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [10560,12036]
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 7
|
||||
DEBUG: pruning merge fetch taskId 3
|
||||
DETAIL: Creating dependency on merge taskId 7
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 14
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 14
|
||||
DEBUG: pruning merge fetch taskId 9
|
||||
DETAIL: Creating dependency on merge taskId 14
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 14
|
||||
DEBUG: pruning merge fetch taskId 13
|
||||
DETAIL: Creating dependency on merge taskId 21
|
||||
DEBUG: pruning merge fetch taskId 15
|
||||
DETAIL: Creating dependency on merge taskId 21
|
||||
DEBUG: pruning merge fetch taskId 17
|
||||
DETAIL: Creating dependency on merge taskId 28
|
||||
DEBUG: pruning merge fetch taskId 19
|
||||
DETAIL: Creating dependency on merge taskId 28
|
||||
DEBUG: pruning merge fetch taskId 21
|
||||
DETAIL: Creating dependency on merge taskId 35
|
||||
DEBUG: pruning merge fetch taskId 23
|
||||
DETAIL: Creating dependency on merge taskId 35
|
||||
DEBUG: pruning merge fetch taskId 25
|
||||
DETAIL: Creating dependency on merge taskId 42
|
||||
DEBUG: pruning merge fetch taskId 27
|
||||
DETAIL: Creating dependency on merge taskId 42
|
||||
DEBUG: pruning merge fetch taskId 29
|
||||
DETAIL: Creating dependency on merge taskId 42
|
||||
DEBUG: pruning merge fetch taskId 31
|
||||
DETAIL: Creating dependency on merge taskId 49
|
||||
DEBUG: pruning merge fetch taskId 33
|
||||
DETAIL: Creating dependency on merge taskId 49
|
||||
DEBUG: pruning merge fetch taskId 35
|
||||
DETAIL: Creating dependency on merge taskId 49
|
||||
DEBUG: pruning merge fetch taskId 37
|
||||
DETAIL: Creating dependency on merge taskId 56
|
||||
DEBUG: pruning merge fetch taskId 39
|
||||
DETAIL: Creating dependency on merge taskId 56
|
||||
DEBUG: assigned task 14 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: assigned task 18 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 32 to node localhost:57637
|
||||
DEBUG: assigned task 22 to node localhost:57638
|
||||
DEBUG: assigned task 26 to node localhost:57637
|
||||
DEBUG: assigned task 38 to node localhost:57638
|
||||
DEBUG: propagating assignment from merge task 7 to constrained sql task 4
|
||||
DEBUG: propagating assignment from merge task 14 to constrained sql task 8
|
||||
DEBUG: propagating assignment from merge task 14 to constrained sql task 10
|
||||
DEBUG: propagating assignment from merge task 14 to constrained sql task 12
|
||||
DEBUG: propagating assignment from merge task 21 to constrained sql task 16
|
||||
DEBUG: propagating assignment from merge task 28 to constrained sql task 20
|
||||
DEBUG: propagating assignment from merge task 35 to constrained sql task 24
|
||||
DEBUG: propagating assignment from merge task 42 to constrained sql task 28
|
||||
DEBUG: propagating assignment from merge task 42 to constrained sql task 30
|
||||
DEBUG: propagating assignment from merge task 49 to constrained sql task 34
|
||||
DEBUG: propagating assignment from merge task 49 to constrained sql task 36
|
||||
DEBUG: propagating assignment from merge task 56 to constrained sql task 40
|
||||
count
|
||||
-------
|
||||
11998
|
||||
12000
|
||||
(1 row)
|
||||
|
||||
SET citus.large_table_shard_count TO 2;
|
||||
|
@ -186,12 +88,6 @@ WHERE
|
|||
l_partkey = c_nationkey;
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 5 to node localhost:57638
|
||||
DEBUG: assigned task 8 to node localhost:57637
|
||||
DEBUG: assigned task 7 to node localhost:57638
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 1 to node localhost:57637
|
||||
|
@ -208,25 +104,25 @@ DEBUG: join prunable for task partitionId 3 and 0
|
|||
DEBUG: join prunable for task partitionId 3 and 1
|
||||
DEBUG: join prunable for task partitionId 3 and 2
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 2
|
||||
DETAIL: Creating dependency on merge taskId 4
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 18
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 8
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 27
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 8
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: pruning merge fetch taskId 10
|
||||
DETAIL: Creating dependency on merge taskId 36
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 16
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 9 to node localhost:57638
|
||||
DEBUG: assigned task 12 to node localhost:57637
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 9 to node localhost:57637
|
||||
DEBUG: assigned task 12 to node localhost:57638
|
||||
count
|
||||
-------
|
||||
125
|
||||
|
|
|
@ -54,7 +54,7 @@ DEBUG: assigned task 2 to node localhost:57637
|
|||
DEBUG: CommitTransactionCommand
|
||||
count
|
||||
-------
|
||||
2984
|
||||
2985
|
||||
(1 row)
|
||||
|
||||
-- Single range repartition join, along with a join with a small table containing
|
||||
|
@ -73,120 +73,22 @@ WHERE
|
|||
o_custkey = c_custkey AND
|
||||
o_orderkey = l_orderkey;
|
||||
DEBUG: StartTransactionCommand
|
||||
DEBUG: join prunable for intervals [1,5986] and [8997,14947]
|
||||
DEBUG: join prunable for intervals [8997,14947] and [1,5986]
|
||||
DEBUG: join prunable for intervals [1,5986] and [8997,14947]
|
||||
DEBUG: join prunable for intervals [8997,14947] and [1,5986]
|
||||
DEBUG: join prunable for intervals [1,5986] and [8997,14947]
|
||||
DEBUG: join prunable for intervals [8997,14947] and [1,5986]
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DEBUG: assigned task 5 to node localhost:57638
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 1 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: join prunable for intervals [1,1509] and [2951,4455]
|
||||
DEBUG: join prunable for intervals [1,1509] and [4480,5986]
|
||||
DEBUG: join prunable for intervals [1,1509] and [8997,10560]
|
||||
DEBUG: join prunable for intervals [1,1509] and [10560,12036]
|
||||
DEBUG: join prunable for intervals [1,1509] and [12036,13473]
|
||||
DEBUG: join prunable for intervals [1,1509] and [13473,14947]
|
||||
DEBUG: join prunable for intervals [1509,4964] and [8997,10560]
|
||||
DEBUG: join prunable for intervals [1509,4964] and [10560,12036]
|
||||
DEBUG: join prunable for intervals [1509,4964] and [12036,13473]
|
||||
DEBUG: join prunable for intervals [1509,4964] and [13473,14947]
|
||||
DEBUG: join prunable for intervals [2951,4455] and [1,1509]
|
||||
DEBUG: join prunable for intervals [2951,4455] and [4480,5986]
|
||||
DEBUG: join prunable for intervals [2951,4455] and [8997,10560]
|
||||
DEBUG: join prunable for intervals [2951,4455] and [10560,12036]
|
||||
DEBUG: join prunable for intervals [2951,4455] and [12036,13473]
|
||||
DEBUG: join prunable for intervals [2951,4455] and [13473,14947]
|
||||
DEBUG: join prunable for intervals [4480,5986] and [1,1509]
|
||||
DEBUG: join prunable for intervals [4480,5986] and [2951,4455]
|
||||
DEBUG: join prunable for intervals [4480,5986] and [8997,10560]
|
||||
DEBUG: join prunable for intervals [4480,5986] and [10560,12036]
|
||||
DEBUG: join prunable for intervals [4480,5986] and [12036,13473]
|
||||
DEBUG: join prunable for intervals [4480,5986] and [13473,14947]
|
||||
DEBUG: join prunable for intervals [8997,10560] and [1,1509]
|
||||
DEBUG: join prunable for intervals [8997,10560] and [1509,4964]
|
||||
DEBUG: join prunable for intervals [8997,10560] and [2951,4455]
|
||||
DEBUG: join prunable for intervals [8997,10560] and [4480,5986]
|
||||
DEBUG: join prunable for intervals [8997,10560] and [12036,13473]
|
||||
DEBUG: join prunable for intervals [8997,10560] and [13473,14947]
|
||||
DEBUG: join prunable for intervals [10560,12036] and [1,1509]
|
||||
DEBUG: join prunable for intervals [10560,12036] and [1509,4964]
|
||||
DEBUG: join prunable for intervals [10560,12036] and [2951,4455]
|
||||
DEBUG: join prunable for intervals [10560,12036] and [4480,5986]
|
||||
DEBUG: join prunable for intervals [10560,12036] and [13473,14947]
|
||||
DEBUG: join prunable for intervals [12036,13473] and [1,1509]
|
||||
DEBUG: join prunable for intervals [12036,13473] and [1509,4964]
|
||||
DEBUG: join prunable for intervals [12036,13473] and [2951,4455]
|
||||
DEBUG: join prunable for intervals [12036,13473] and [4480,5986]
|
||||
DEBUG: join prunable for intervals [12036,13473] and [8997,10560]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [1,1509]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [1509,4964]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [2951,4455]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [4480,5986]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [8997,10560]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [10560,12036]
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 7
|
||||
DEBUG: pruning merge fetch taskId 3
|
||||
DETAIL: Creating dependency on merge taskId 7
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 14
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 14
|
||||
DEBUG: pruning merge fetch taskId 9
|
||||
DETAIL: Creating dependency on merge taskId 14
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 14
|
||||
DEBUG: pruning merge fetch taskId 13
|
||||
DETAIL: Creating dependency on merge taskId 21
|
||||
DEBUG: pruning merge fetch taskId 15
|
||||
DETAIL: Creating dependency on merge taskId 21
|
||||
DEBUG: pruning merge fetch taskId 17
|
||||
DETAIL: Creating dependency on merge taskId 28
|
||||
DEBUG: pruning merge fetch taskId 19
|
||||
DETAIL: Creating dependency on merge taskId 28
|
||||
DEBUG: pruning merge fetch taskId 21
|
||||
DETAIL: Creating dependency on merge taskId 35
|
||||
DEBUG: pruning merge fetch taskId 23
|
||||
DETAIL: Creating dependency on merge taskId 35
|
||||
DEBUG: pruning merge fetch taskId 25
|
||||
DETAIL: Creating dependency on merge taskId 42
|
||||
DEBUG: pruning merge fetch taskId 27
|
||||
DETAIL: Creating dependency on merge taskId 42
|
||||
DEBUG: pruning merge fetch taskId 29
|
||||
DETAIL: Creating dependency on merge taskId 42
|
||||
DEBUG: pruning merge fetch taskId 31
|
||||
DETAIL: Creating dependency on merge taskId 49
|
||||
DEBUG: pruning merge fetch taskId 33
|
||||
DETAIL: Creating dependency on merge taskId 49
|
||||
DEBUG: pruning merge fetch taskId 35
|
||||
DETAIL: Creating dependency on merge taskId 49
|
||||
DEBUG: pruning merge fetch taskId 37
|
||||
DETAIL: Creating dependency on merge taskId 56
|
||||
DEBUG: pruning merge fetch taskId 39
|
||||
DETAIL: Creating dependency on merge taskId 56
|
||||
DEBUG: assigned task 14 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: assigned task 18 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 32 to node localhost:57637
|
||||
DEBUG: assigned task 22 to node localhost:57638
|
||||
DEBUG: assigned task 26 to node localhost:57637
|
||||
DEBUG: assigned task 38 to node localhost:57638
|
||||
DEBUG: propagating assignment from merge task 7 to constrained sql task 4
|
||||
DEBUG: propagating assignment from merge task 14 to constrained sql task 8
|
||||
DEBUG: propagating assignment from merge task 14 to constrained sql task 10
|
||||
DEBUG: propagating assignment from merge task 14 to constrained sql task 12
|
||||
DEBUG: propagating assignment from merge task 21 to constrained sql task 16
|
||||
DEBUG: propagating assignment from merge task 28 to constrained sql task 20
|
||||
DEBUG: propagating assignment from merge task 35 to constrained sql task 24
|
||||
DEBUG: propagating assignment from merge task 42 to constrained sql task 28
|
||||
DEBUG: propagating assignment from merge task 42 to constrained sql task 30
|
||||
DEBUG: propagating assignment from merge task 49 to constrained sql task 34
|
||||
DEBUG: propagating assignment from merge task 49 to constrained sql task 36
|
||||
DEBUG: propagating assignment from merge task 56 to constrained sql task 40
|
||||
DEBUG: CommitTransactionCommand
|
||||
count
|
||||
-------
|
||||
11998
|
||||
12000
|
||||
(1 row)
|
||||
|
||||
SET citus.large_table_shard_count TO 2;
|
||||
|
@ -204,12 +106,6 @@ WHERE
|
|||
DEBUG: StartTransactionCommand
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 5 to node localhost:57638
|
||||
DEBUG: assigned task 8 to node localhost:57637
|
||||
DEBUG: assigned task 7 to node localhost:57638
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 1 to node localhost:57637
|
||||
|
@ -226,25 +122,25 @@ DEBUG: join prunable for task partitionId 3 and 0
|
|||
DEBUG: join prunable for task partitionId 3 and 1
|
||||
DEBUG: join prunable for task partitionId 3 and 2
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 2
|
||||
DETAIL: Creating dependency on merge taskId 4
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 18
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 8
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 27
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 8
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: pruning merge fetch taskId 10
|
||||
DETAIL: Creating dependency on merge taskId 36
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 16
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 9 to node localhost:57638
|
||||
DEBUG: assigned task 12 to node localhost:57637
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 9 to node localhost:57637
|
||||
DEBUG: assigned task 12 to node localhost:57638
|
||||
DEBUG: CommitTransactionCommand
|
||||
count
|
||||
-------
|
||||
|
|
|
@ -40,7 +40,7 @@ DEBUG: push down of limit count: 600
|
|||
199283 | 2726988572
|
||||
185925 | 2672114100
|
||||
196629 | 2622637602
|
||||
157064 | 2614644408
|
||||
149926 | 2606013732
|
||||
(10 rows)
|
||||
|
||||
-- Disable limit optimization for our second test. This time, we have a query
|
||||
|
@ -79,11 +79,11 @@ DEBUG: push down of limit count: 150
|
|||
c_custkey | c_name | lineitem_count
|
||||
-----------+--------------------+----------------
|
||||
43 | Customer#000000043 | 42
|
||||
370 | Customer#000000370 | 38
|
||||
79 | Customer#000000079 | 37
|
||||
689 | Customer#000000689 | 36
|
||||
472 | Customer#000000472 | 35
|
||||
685 | Customer#000000685 | 35
|
||||
370 | Customer#000000370 | 40
|
||||
79 | Customer#000000079 | 38
|
||||
689 | Customer#000000689 | 38
|
||||
685 | Customer#000000685 | 37
|
||||
472 | Customer#000000472 | 36
|
||||
643 | Customer#000000643 | 34
|
||||
226 | Customer#000000226 | 33
|
||||
496 | Customer#000000496 | 32
|
||||
|
|
|
@ -7,7 +7,7 @@ SELECT part_storage_type, part_key, part_replica_count, part_max_size,
|
|||
part_placement_policy FROM master_get_table_metadata('lineitem');
|
||||
part_storage_type | part_key | part_replica_count | part_max_size | part_placement_policy
|
||||
-------------------+------------+--------------------+---------------+-----------------------
|
||||
t | l_orderkey | 2 | 307200 | 2
|
||||
t | l_orderkey | 2 | 1536000 | 2
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM master_get_table_ddl_events('lineitem');
|
||||
|
|
|
@ -906,6 +906,8 @@ DROP TRIGGER reject_bad_reference ON reference_modifying_xacts_1200006;
|
|||
-- now create a hash distributed table and run tests
|
||||
-- including both the reference table and the hash
|
||||
-- distributed table
|
||||
-- To prevent colocating a hash table with append table
|
||||
DELETE FROM pg_dist_colocation WHERE colocationid = 100001;
|
||||
SET citus.next_shard_id TO 1200007;
|
||||
SET citus.shard_count = 4;
|
||||
SET citus.shard_replication_factor = 1;
|
||||
|
|
|
@ -15,7 +15,7 @@ CREATE OPERATOR = (
|
|||
LEFTARG = test_udt,
|
||||
RIGHTARG = test_udt,
|
||||
PROCEDURE = equal_test_udt_function,
|
||||
COMMUTATOR = =,
|
||||
COMMUTATOR = =,
|
||||
HASHES
|
||||
);
|
||||
-- ... and create a custom operator family for hash indexes...
|
||||
|
@ -38,14 +38,14 @@ OPERATOR 1 = (test_udt, test_udt),
|
|||
FUNCTION 1 test_udt_hash(test_udt);
|
||||
-- END type creation
|
||||
CREATE TABLE repartition_udt (
|
||||
pk integer not null,
|
||||
udtcol test_udt,
|
||||
txtcol text
|
||||
pk integer not null,
|
||||
udtcol test_udt,
|
||||
txtcol text
|
||||
);
|
||||
CREATE TABLE repartition_udt_other (
|
||||
pk integer not null,
|
||||
udtcol test_udt,
|
||||
txtcol text
|
||||
pk integer not null,
|
||||
udtcol test_udt,
|
||||
txtcol text
|
||||
);
|
||||
-- Connect directly to a worker, create and drop the type, then
|
||||
-- proceed with type creation as above; thus the OIDs will be different.
|
||||
|
@ -66,7 +66,7 @@ CREATE OPERATOR = (
|
|||
LEFTARG = test_udt,
|
||||
RIGHTARG = test_udt,
|
||||
PROCEDURE = equal_test_udt_function,
|
||||
COMMUTATOR = =,
|
||||
COMMUTATOR = =,
|
||||
HASHES
|
||||
);
|
||||
-- ... and create a custom operator family for hash indexes...
|
||||
|
@ -102,7 +102,7 @@ CREATE OPERATOR = (
|
|||
LEFTARG = test_udt,
|
||||
RIGHTARG = test_udt,
|
||||
PROCEDURE = equal_test_udt_function,
|
||||
COMMUTATOR = =,
|
||||
COMMUTATOR = =,
|
||||
HASHES
|
||||
);
|
||||
-- ... and create a custom operator family for hash indexes...
|
||||
|
@ -161,7 +161,7 @@ SET client_min_messages = LOG;
|
|||
-- manually make the query router plannable.
|
||||
SELECT * FROM repartition_udt JOIN repartition_udt_other
|
||||
ON repartition_udt.pk = repartition_udt_other.pk
|
||||
WHERE repartition_udt.pk = 1;
|
||||
WHERE repartition_udt.pk = 1;
|
||||
pk | udtcol | txtcol | pk | udtcol | txtcol
|
||||
----+--------+--------+----+--------+--------
|
||||
(0 rows)
|
||||
|
@ -172,7 +172,7 @@ SET citus.task_executor_type = 'task-tracker';
|
|||
SET citus.log_multi_join_order = true;
|
||||
EXPLAIN SELECT * FROM repartition_udt JOIN repartition_udt_other
|
||||
ON repartition_udt.udtcol = repartition_udt_other.udtcol
|
||||
WHERE repartition_udt.pk > 1;
|
||||
WHERE repartition_udt.pk > 1;
|
||||
LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ]
|
||||
QUERY PLAN
|
||||
--------------------------------------------------------------------
|
||||
|
@ -189,8 +189,8 @@ LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_ot
|
|||
|
||||
SELECT * FROM repartition_udt JOIN repartition_udt_other
|
||||
ON repartition_udt.udtcol = repartition_udt_other.udtcol
|
||||
WHERE repartition_udt.pk > 1
|
||||
ORDER BY repartition_udt.pk;
|
||||
WHERE repartition_udt.pk > 1
|
||||
ORDER BY repartition_udt.pk;
|
||||
LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ]
|
||||
pk | udtcol | txtcol | pk | udtcol | txtcol
|
||||
----+--------+--------+----+--------+--------
|
||||
|
@ -201,6 +201,6 @@ LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_ot
|
|||
6 | (2,3) | foo | 12 | (2,3) | foo
|
||||
(5 rows)
|
||||
|
||||
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - - :worker_2_port
|
||||
|
|
|
@ -19,22 +19,25 @@ SET citus.explain_all_tasks TO on;
|
|||
SET citus.task_executor_type TO 'real-time';
|
||||
-- Change configuration to treat lineitem and orders tables as large
|
||||
SET citus.large_table_shard_count TO 2;
|
||||
SET citus.log_multi_join_order to true;
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290000;
|
||||
shardminvalue | shardmaxvalue
|
||||
---------------+---------------
|
||||
1 | 1509
|
||||
1 | 5986
|
||||
(1 row)
|
||||
|
||||
SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290001;
|
||||
shardminvalue | shardmaxvalue
|
||||
---------------+---------------
|
||||
1509 | 2951
|
||||
8997 | 14947
|
||||
(1 row)
|
||||
|
||||
-- Check that partition and join pruning works when min/max values exist
|
||||
-- Adding l_orderkey = 1 to make the query not router executable
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1;
|
||||
LOG: join order: [ "lineitem" ]
|
||||
QUERY PLAN
|
||||
-----------------------------------------------------------------------
|
||||
Custom Scan (Citus Real-Time)
|
||||
|
@ -42,6 +45,15 @@ SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 903
|
|||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Bitmap Heap Scan on lineitem_290001 lineitem
|
||||
Recheck Cond: ((l_orderkey = 9030) OR (l_orderkey = 1))
|
||||
-> BitmapOr
|
||||
-> Bitmap Index Scan on lineitem_pkey_290001
|
||||
Index Cond: (l_orderkey = 9030)
|
||||
-> Bitmap Index Scan on lineitem_pkey_290001
|
||||
Index Cond: (l_orderkey = 1)
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Bitmap Heap Scan on lineitem_290000 lineitem
|
||||
Recheck Cond: ((l_orderkey = 9030) OR (l_orderkey = 1))
|
||||
-> BitmapOr
|
||||
|
@ -49,97 +61,45 @@ SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 903
|
|||
Index Cond: (l_orderkey = 9030)
|
||||
-> Bitmap Index Scan on lineitem_pkey_290000
|
||||
Index Cond: (l_orderkey = 1)
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Bitmap Heap Scan on lineitem_290004 lineitem
|
||||
Recheck Cond: ((l_orderkey = 9030) OR (l_orderkey = 1))
|
||||
-> BitmapOr
|
||||
-> Bitmap Index Scan on lineitem_pkey_290004
|
||||
Index Cond: (l_orderkey = 9030)
|
||||
-> Bitmap Index Scan on lineitem_pkey_290004
|
||||
Index Cond: (l_orderkey = 1)
|
||||
(21 rows)
|
||||
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_orderkey = o_orderkey;
|
||||
DEBUG: join prunable for intervals [1,1509] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [1509,2951] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [2951,4455] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [4480,5986] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
|
||||
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
|
||||
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------------------------------------------
|
||||
LOG: join order: [ "lineitem" ][ local partition join "orders" ]
|
||||
DEBUG: join prunable for intervals [1,5986] and [8997,14947]
|
||||
DEBUG: join prunable for intervals [8997,14947] and [1,5986]
|
||||
QUERY PLAN
|
||||
--------------------------------------------------------------------------------
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Real-Time)
|
||||
Task Count: 8
|
||||
Task Count: 2
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Hash Join
|
||||
Hash Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Seq Scan on lineitem_290001 lineitem
|
||||
-> Hash
|
||||
-> Seq Scan on orders_290003 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
|
||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem
|
||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem
|
||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
(60 rows)
|
||||
-> Hash Join
|
||||
Hash Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Seq Scan on lineitem_290000 lineitem
|
||||
-> Hash
|
||||
-> Seq Scan on orders_290002 orders
|
||||
(20 rows)
|
||||
|
||||
-- Now set the minimum value for a shard to null. Then check that we don't apply
|
||||
-- partition or join pruning for the shard with null min value.
|
||||
-- partition or join pruning for the shard with null min value. Since it is not
|
||||
-- supported with single-repartition join, dual-repartition has been used.
|
||||
UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 290000;
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
|
||||
LOG: join order: [ "lineitem" ]
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------------------
|
||||
Custom Scan (Citus Real-Time)
|
||||
|
@ -147,211 +107,69 @@ SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 903
|
|||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
|
||||
-> Index Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
||||
Index Cond: (l_orderkey = 9030)
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Index Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
|
||||
-> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
|
||||
Index Cond: (l_orderkey = 9030)
|
||||
(11 rows)
|
||||
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_orderkey = o_orderkey;
|
||||
DEBUG: join prunable for intervals [1509,2951] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [2951,4455] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [4480,5986] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
|
||||
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
|
||||
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------------------------------------------
|
||||
WHERE l_partkey = o_custkey;
|
||||
LOG: join order: [ "lineitem" ][ dual partition join "orders" ]
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
DEBUG: join prunable for task partitionId 0 and 3
|
||||
DEBUG: join prunable for task partitionId 1 and 0
|
||||
DEBUG: join prunable for task partitionId 1 and 2
|
||||
DEBUG: join prunable for task partitionId 1 and 3
|
||||
DEBUG: join prunable for task partitionId 2 and 0
|
||||
DEBUG: join prunable for task partitionId 2 and 1
|
||||
DEBUG: join prunable for task partitionId 2 and 3
|
||||
DEBUG: join prunable for task partitionId 3 and 0
|
||||
DEBUG: join prunable for task partitionId 3 and 1
|
||||
DEBUG: join prunable for task partitionId 3 and 2
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 2
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 8
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 10
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: cannot use real time executor with repartition jobs
|
||||
HINT: Since you enabled citus.enable_repartition_joins Citus chose to use task-tracker.
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Real-Time)
|
||||
Task Count: 9
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
|
||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem
|
||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem
|
||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
(67 rows)
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
Task Count: 4
|
||||
Tasks Shown: None, not supported for re-partition queries
|
||||
-> MapMergeJob
|
||||
Map Task Count: 2
|
||||
Merge Task Count: 4
|
||||
-> MapMergeJob
|
||||
Map Task Count: 2
|
||||
Merge Task Count: 4
|
||||
(10 rows)
|
||||
|
||||
-- Next, set the maximum value for another shard to null. Then check that we
|
||||
-- don't apply partition or join pruning for this other shard either.
|
||||
-- don't apply partition or join pruning for this other shard either. Since it
|
||||
-- is not supported with single-repartition join, dual-repartition has been used.
|
||||
UPDATE pg_dist_shard SET shardmaxvalue = NULL WHERE shardid = 290001;
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------------------
|
||||
Custom Scan (Citus Real-Time)
|
||||
Task Count: 3
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Index Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
||||
Index Cond: (l_orderkey = 9030)
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
|
||||
Index Cond: (l_orderkey = 9030)
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Index Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
|
||||
Index Cond: (l_orderkey = 9030)
|
||||
(15 rows)
|
||||
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_orderkey = o_orderkey;
|
||||
DEBUG: join prunable for intervals [2951,4455] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [4480,5986] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
|
||||
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
|
||||
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------------------------------------------
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Real-Time)
|
||||
Task Count: 10
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
|
||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem
|
||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem
|
||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
(74 rows)
|
||||
|
||||
-- Last, set the minimum value to 0 and check that we don't treat it as null. We
|
||||
-- should apply partition and join pruning for this shard now.
|
||||
UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = 290000;
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
|
||||
LOG: join order: [ "lineitem" ]
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------------------
|
||||
Custom Scan (Citus Real-Time)
|
||||
|
@ -363,92 +181,126 @@ SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 903
|
|||
Index Cond: (l_orderkey = 9030)
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Index Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
|
||||
-> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
|
||||
Index Cond: (l_orderkey = 9030)
|
||||
(11 rows)
|
||||
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_orderkey = o_orderkey;
|
||||
DEBUG: join prunable for intervals [0,1509] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [2951,4455] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [4480,5986] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
|
||||
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
|
||||
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------------------------------------------
|
||||
WHERE l_partkey = o_custkey;
|
||||
LOG: join order: [ "lineitem" ][ dual partition join "orders" ]
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
DEBUG: join prunable for task partitionId 0 and 3
|
||||
DEBUG: join prunable for task partitionId 1 and 0
|
||||
DEBUG: join prunable for task partitionId 1 and 2
|
||||
DEBUG: join prunable for task partitionId 1 and 3
|
||||
DEBUG: join prunable for task partitionId 2 and 0
|
||||
DEBUG: join prunable for task partitionId 2 and 1
|
||||
DEBUG: join prunable for task partitionId 2 and 3
|
||||
DEBUG: join prunable for task partitionId 3 and 0
|
||||
DEBUG: join prunable for task partitionId 3 and 1
|
||||
DEBUG: join prunable for task partitionId 3 and 2
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 2
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 8
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 10
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: cannot use real time executor with repartition jobs
|
||||
HINT: Since you enabled citus.enable_repartition_joins Citus chose to use task-tracker.
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Real-Time)
|
||||
Task Count: 9
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
|
||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem
|
||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem
|
||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
(67 rows)
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
Task Count: 4
|
||||
Tasks Shown: None, not supported for re-partition queries
|
||||
-> MapMergeJob
|
||||
Map Task Count: 2
|
||||
Merge Task Count: 4
|
||||
-> MapMergeJob
|
||||
Map Task Count: 2
|
||||
Merge Task Count: 4
|
||||
(10 rows)
|
||||
|
||||
-- Last, set the minimum value to 0 and check that we don't treat it as null. We
|
||||
-- should apply partition and join pruning for this shard now. Since it is not
|
||||
-- supported with single-repartition join, dual-repartition has been used.
|
||||
UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = 290000;
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
|
||||
LOG: join order: [ "lineitem" ]
|
||||
DEBUG: Plan is router executable
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------------------
|
||||
Custom Scan (Citus Router)
|
||||
Task Count: 1
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Index Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
||||
Index Cond: (l_orderkey = 9030)
|
||||
(7 rows)
|
||||
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_partkey = o_custkey;
|
||||
LOG: join order: [ "lineitem" ][ dual partition join "orders" ]
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
DEBUG: join prunable for task partitionId 0 and 3
|
||||
DEBUG: join prunable for task partitionId 1 and 0
|
||||
DEBUG: join prunable for task partitionId 1 and 2
|
||||
DEBUG: join prunable for task partitionId 1 and 3
|
||||
DEBUG: join prunable for task partitionId 2 and 0
|
||||
DEBUG: join prunable for task partitionId 2 and 1
|
||||
DEBUG: join prunable for task partitionId 2 and 3
|
||||
DEBUG: join prunable for task partitionId 3 and 0
|
||||
DEBUG: join prunable for task partitionId 3 and 1
|
||||
DEBUG: join prunable for task partitionId 3 and 2
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 2
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 8
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 10
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: cannot use real time executor with repartition jobs
|
||||
HINT: Since you enabled citus.enable_repartition_joins Citus chose to use task-tracker.
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
Task Count: 4
|
||||
Tasks Shown: None, not supported for re-partition queries
|
||||
-> MapMergeJob
|
||||
Map Task Count: 2
|
||||
Merge Task Count: 4
|
||||
-> MapMergeJob
|
||||
Map Task Count: 2
|
||||
Merge Task Count: 4
|
||||
(10 rows)
|
||||
|
||||
-- Set minimum and maximum values for two shards back to their original values
|
||||
UPDATE pg_dist_shard SET shardminvalue = '1' WHERE shardid = 290000;
|
||||
UPDATE pg_dist_shard SET shardmaxvalue = '4964' WHERE shardid = 290001;
|
||||
UPDATE pg_dist_shard SET shardmaxvalue = '14947' WHERE shardid = 290001;
|
||||
SET client_min_messages TO NOTICE;
|
||||
|
|
|
@ -19,22 +19,25 @@ SET citus.explain_all_tasks TO on;
|
|||
SET citus.task_executor_type TO 'real-time';
|
||||
-- Change configuration to treat lineitem and orders tables as large
|
||||
SET citus.large_table_shard_count TO 2;
|
||||
SET citus.log_multi_join_order to true;
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290000;
|
||||
shardminvalue | shardmaxvalue
|
||||
---------------+---------------
|
||||
1 | 1509
|
||||
1 | 5986
|
||||
(1 row)
|
||||
|
||||
SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290001;
|
||||
shardminvalue | shardmaxvalue
|
||||
---------------+---------------
|
||||
1509 | 2951
|
||||
8997 | 14947
|
||||
(1 row)
|
||||
|
||||
-- Check that partition and join pruning works when min/max values exist
|
||||
-- Adding l_orderkey = 1 to make the query not router executable
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1;
|
||||
LOG: join order: [ "lineitem" ]
|
||||
QUERY PLAN
|
||||
-----------------------------------------------------------------------
|
||||
Custom Scan (Citus Real-Time)
|
||||
|
@ -42,6 +45,15 @@ SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 903
|
|||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Bitmap Heap Scan on lineitem_290001 lineitem
|
||||
Recheck Cond: ((l_orderkey = 9030) OR (l_orderkey = 1))
|
||||
-> BitmapOr
|
||||
-> Bitmap Index Scan on lineitem_pkey_290001
|
||||
Index Cond: (l_orderkey = 9030)
|
||||
-> Bitmap Index Scan on lineitem_pkey_290001
|
||||
Index Cond: (l_orderkey = 1)
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Bitmap Heap Scan on lineitem_290000 lineitem
|
||||
Recheck Cond: ((l_orderkey = 9030) OR (l_orderkey = 1))
|
||||
-> BitmapOr
|
||||
|
@ -49,97 +61,43 @@ SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 903
|
|||
Index Cond: (l_orderkey = 9030)
|
||||
-> Bitmap Index Scan on lineitem_pkey_290000
|
||||
Index Cond: (l_orderkey = 1)
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Bitmap Heap Scan on lineitem_290004 lineitem
|
||||
Recheck Cond: ((l_orderkey = 9030) OR (l_orderkey = 1))
|
||||
-> BitmapOr
|
||||
-> Bitmap Index Scan on lineitem_pkey_290004
|
||||
Index Cond: (l_orderkey = 9030)
|
||||
-> Bitmap Index Scan on lineitem_pkey_290004
|
||||
Index Cond: (l_orderkey = 1)
|
||||
(21 rows)
|
||||
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_orderkey = o_orderkey;
|
||||
DEBUG: join prunable for intervals [1,1509] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [1509,2951] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [2951,4455] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [4480,5986] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
|
||||
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
|
||||
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
|
||||
LOG: join order: [ "lineitem" ][ local partition join "orders" ]
|
||||
DEBUG: join prunable for intervals [1,5986] and [8997,14947]
|
||||
DEBUG: join prunable for intervals [8997,14947] and [1,5986]
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------------------------------------------
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Real-Time)
|
||||
Task Count: 8
|
||||
Task Count: 2
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Index Only Scan using orders_pkey_290003 on orders_290003 orders
|
||||
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290002 on orders_290002 orders
|
||||
-> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
|
||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem
|
||||
(60 rows)
|
||||
(18 rows)
|
||||
|
||||
-- Now set the minimum value for a shard to null. Then check that we don't apply
|
||||
-- partition or join pruning for the shard with null min value.
|
||||
-- partition or join pruning for the shard with null min value. Since it is not
|
||||
-- supported with single-repartition join, dual-repartition has been used.
|
||||
UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 290000;
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
|
||||
LOG: join order: [ "lineitem" ]
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------------------
|
||||
Custom Scan (Citus Real-Time)
|
||||
|
@ -147,211 +105,69 @@ SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 903
|
|||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
|
||||
-> Index Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
||||
Index Cond: (l_orderkey = 9030)
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Index Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
|
||||
-> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
|
||||
Index Cond: (l_orderkey = 9030)
|
||||
(11 rows)
|
||||
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_orderkey = o_orderkey;
|
||||
DEBUG: join prunable for intervals [1509,2951] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [2951,4455] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [4480,5986] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
|
||||
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
|
||||
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------------------------------------------
|
||||
WHERE l_partkey = o_custkey;
|
||||
LOG: join order: [ "lineitem" ][ dual partition join "orders" ]
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
DEBUG: join prunable for task partitionId 0 and 3
|
||||
DEBUG: join prunable for task partitionId 1 and 0
|
||||
DEBUG: join prunable for task partitionId 1 and 2
|
||||
DEBUG: join prunable for task partitionId 1 and 3
|
||||
DEBUG: join prunable for task partitionId 2 and 0
|
||||
DEBUG: join prunable for task partitionId 2 and 1
|
||||
DEBUG: join prunable for task partitionId 2 and 3
|
||||
DEBUG: join prunable for task partitionId 3 and 0
|
||||
DEBUG: join prunable for task partitionId 3 and 1
|
||||
DEBUG: join prunable for task partitionId 3 and 2
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 2
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 8
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 10
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: cannot use real time executor with repartition jobs
|
||||
HINT: Since you enabled citus.enable_repartition_joins Citus chose to use task-tracker.
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Real-Time)
|
||||
Task Count: 9
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
|
||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem
|
||||
(67 rows)
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
Task Count: 4
|
||||
Tasks Shown: None, not supported for re-partition queries
|
||||
-> MapMergeJob
|
||||
Map Task Count: 2
|
||||
Merge Task Count: 4
|
||||
-> MapMergeJob
|
||||
Map Task Count: 2
|
||||
Merge Task Count: 4
|
||||
(10 rows)
|
||||
|
||||
-- Next, set the maximum value for another shard to null. Then check that we
|
||||
-- don't apply partition or join pruning for this other shard either.
|
||||
-- don't apply partition or join pruning for this other shard either. Since it
|
||||
-- is not supported with single-repartition join, dual-repartition has been used.
|
||||
UPDATE pg_dist_shard SET shardmaxvalue = NULL WHERE shardid = 290001;
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------------------
|
||||
Custom Scan (Citus Real-Time)
|
||||
Task Count: 3
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Index Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
||||
Index Cond: (l_orderkey = 9030)
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
|
||||
Index Cond: (l_orderkey = 9030)
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Index Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
|
||||
Index Cond: (l_orderkey = 9030)
|
||||
(15 rows)
|
||||
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_orderkey = o_orderkey;
|
||||
DEBUG: join prunable for intervals [2951,4455] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [4480,5986] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
|
||||
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
|
||||
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------------------------------------------
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Real-Time)
|
||||
Task Count: 10
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
|
||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem
|
||||
(74 rows)
|
||||
|
||||
-- Last, set the minimum value to 0 and check that we don't treat it as null. We
|
||||
-- should apply partition and join pruning for this shard now.
|
||||
UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = 290000;
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
|
||||
LOG: join order: [ "lineitem" ]
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------------------
|
||||
Custom Scan (Citus Real-Time)
|
||||
|
@ -363,92 +179,126 @@ SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 903
|
|||
Index Cond: (l_orderkey = 9030)
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Index Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
|
||||
-> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
|
||||
Index Cond: (l_orderkey = 9030)
|
||||
(11 rows)
|
||||
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_orderkey = o_orderkey;
|
||||
DEBUG: join prunable for intervals [0,1509] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [2951,4455] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [4480,5986] and [8997,14946]
|
||||
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
|
||||
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
|
||||
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------------------------------------------
|
||||
WHERE l_partkey = o_custkey;
|
||||
LOG: join order: [ "lineitem" ][ dual partition join "orders" ]
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
DEBUG: join prunable for task partitionId 0 and 3
|
||||
DEBUG: join prunable for task partitionId 1 and 0
|
||||
DEBUG: join prunable for task partitionId 1 and 2
|
||||
DEBUG: join prunable for task partitionId 1 and 3
|
||||
DEBUG: join prunable for task partitionId 2 and 0
|
||||
DEBUG: join prunable for task partitionId 2 and 1
|
||||
DEBUG: join prunable for task partitionId 2 and 3
|
||||
DEBUG: join prunable for task partitionId 3 and 0
|
||||
DEBUG: join prunable for task partitionId 3 and 1
|
||||
DEBUG: join prunable for task partitionId 3 and 2
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 2
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 8
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 10
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: cannot use real time executor with repartition jobs
|
||||
HINT: Since you enabled citus.enable_repartition_joins Citus chose to use task-tracker.
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Real-Time)
|
||||
Task Count: 9
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
|
||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||
-> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Merge Join
|
||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||
-> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem
|
||||
(67 rows)
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
Task Count: 4
|
||||
Tasks Shown: None, not supported for re-partition queries
|
||||
-> MapMergeJob
|
||||
Map Task Count: 2
|
||||
Merge Task Count: 4
|
||||
-> MapMergeJob
|
||||
Map Task Count: 2
|
||||
Merge Task Count: 4
|
||||
(10 rows)
|
||||
|
||||
-- Last, set the minimum value to 0 and check that we don't treat it as null. We
|
||||
-- should apply partition and join pruning for this shard now. Since it is not
|
||||
-- supported with single-repartition join, dual-repartition has been used.
|
||||
UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = 290000;
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
|
||||
LOG: join order: [ "lineitem" ]
|
||||
DEBUG: Plan is router executable
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------------------
|
||||
Custom Scan (Citus Router)
|
||||
Task Count: 1
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Index Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
||||
Index Cond: (l_orderkey = 9030)
|
||||
(7 rows)
|
||||
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_partkey = o_custkey;
|
||||
LOG: join order: [ "lineitem" ][ dual partition join "orders" ]
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
DEBUG: join prunable for task partitionId 0 and 3
|
||||
DEBUG: join prunable for task partitionId 1 and 0
|
||||
DEBUG: join prunable for task partitionId 1 and 2
|
||||
DEBUG: join prunable for task partitionId 1 and 3
|
||||
DEBUG: join prunable for task partitionId 2 and 0
|
||||
DEBUG: join prunable for task partitionId 2 and 1
|
||||
DEBUG: join prunable for task partitionId 2 and 3
|
||||
DEBUG: join prunable for task partitionId 3 and 0
|
||||
DEBUG: join prunable for task partitionId 3 and 1
|
||||
DEBUG: join prunable for task partitionId 3 and 2
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 2
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 8
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 10
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: cannot use real time executor with repartition jobs
|
||||
HINT: Since you enabled citus.enable_repartition_joins Citus chose to use task-tracker.
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
Task Count: 4
|
||||
Tasks Shown: None, not supported for re-partition queries
|
||||
-> MapMergeJob
|
||||
Map Task Count: 2
|
||||
Merge Task Count: 4
|
||||
-> MapMergeJob
|
||||
Map Task Count: 2
|
||||
Merge Task Count: 4
|
||||
(10 rows)
|
||||
|
||||
-- Set minimum and maximum values for two shards back to their original values
|
||||
UPDATE pg_dist_shard SET shardminvalue = '1' WHERE shardid = 290000;
|
||||
UPDATE pg_dist_shard SET shardmaxvalue = '4964' WHERE shardid = 290001;
|
||||
UPDATE pg_dist_shard SET shardmaxvalue = '14947' WHERE shardid = 290001;
|
||||
SET client_min_messages TO NOTICE;
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
-- need to increase the logging verbosity of messages displayed on the client.
|
||||
SET citus.next_shard_id TO 770000;
|
||||
-- Adding additional l_orderkey = 1 to make this query not router executable
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1;
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1 ORDER BY 1,2;
|
||||
l_orderkey | l_linenumber | l_shipdate
|
||||
------------+--------------+------------
|
||||
1 | 1 | 03-13-1996
|
||||
|
|
|
@ -139,25 +139,25 @@ SET client_min_messages TO INFO;
|
|||
SELECT plpgsql_test_1();
|
||||
plpgsql_test_1
|
||||
----------------
|
||||
2984
|
||||
2985
|
||||
(1 row)
|
||||
|
||||
SELECT plpgsql_test_2();
|
||||
plpgsql_test_2
|
||||
----------------
|
||||
11998
|
||||
12000
|
||||
(1 row)
|
||||
|
||||
SELECT plpgsql_test_3();
|
||||
plpgsql_test_3
|
||||
----------------
|
||||
1955
|
||||
1956
|
||||
(1 row)
|
||||
|
||||
SELECT plpgsql_test_4();
|
||||
plpgsql_test_4
|
||||
----------------
|
||||
7804
|
||||
7806
|
||||
(1 row)
|
||||
|
||||
SELECT plpgsql_test_5();
|
||||
|
@ -170,13 +170,13 @@ SELECT plpgsql_test_5();
|
|||
SELECT plpgsql_test_6(155);
|
||||
plpgsql_test_6
|
||||
----------------
|
||||
11811
|
||||
11813
|
||||
(1 row)
|
||||
|
||||
SELECT plpgsql_test_6(1555);
|
||||
plpgsql_test_6
|
||||
----------------
|
||||
10183
|
||||
10185
|
||||
(1 row)
|
||||
|
||||
SELECT plpgsql_test_7('UNITED KINGDOM', 'CHINA');
|
||||
|
@ -195,13 +195,13 @@ SELECT plpgsql_test_7('FRANCE', 'GERMANY');
|
|||
SELECT plpgsql_test_6(155);
|
||||
plpgsql_test_6
|
||||
----------------
|
||||
11811
|
||||
11813
|
||||
(1 row)
|
||||
|
||||
SELECT plpgsql_test_3();
|
||||
plpgsql_test_3
|
||||
----------------
|
||||
1955
|
||||
1956
|
||||
(1 row)
|
||||
|
||||
SELECT plpgsql_test_7('FRANCE', 'GERMANY');
|
||||
|
@ -219,19 +219,19 @@ SELECT plpgsql_test_5();
|
|||
SELECT plpgsql_test_1();
|
||||
plpgsql_test_1
|
||||
----------------
|
||||
2984
|
||||
2985
|
||||
(1 row)
|
||||
|
||||
SELECT plpgsql_test_6(1555);
|
||||
plpgsql_test_6
|
||||
----------------
|
||||
10183
|
||||
10185
|
||||
(1 row)
|
||||
|
||||
SELECT plpgsql_test_4();
|
||||
plpgsql_test_4
|
||||
----------------
|
||||
7804
|
||||
7806
|
||||
(1 row)
|
||||
|
||||
SELECT plpgsql_test_7('UNITED KINGDOM', 'CHINA');
|
||||
|
@ -243,7 +243,7 @@ SELECT plpgsql_test_7('UNITED KINGDOM', 'CHINA');
|
|||
SELECT plpgsql_test_2();
|
||||
plpgsql_test_2
|
||||
----------------
|
||||
11998
|
||||
12000
|
||||
(1 row)
|
||||
|
||||
-- run the tests which do not require re-partition
|
||||
|
@ -253,26 +253,26 @@ SET citus.task_executor_type TO 'real-time';
|
|||
SELECT plpgsql_test_1();
|
||||
plpgsql_test_1
|
||||
----------------
|
||||
2984
|
||||
2985
|
||||
(1 row)
|
||||
|
||||
SELECT plpgsql_test_2();
|
||||
plpgsql_test_2
|
||||
----------------
|
||||
11998
|
||||
12000
|
||||
(1 row)
|
||||
|
||||
-- run PL/pgsql functions with different parameters
|
||||
SELECT plpgsql_test_6(155);
|
||||
plpgsql_test_6
|
||||
----------------
|
||||
11811
|
||||
11813
|
||||
(1 row)
|
||||
|
||||
SELECT plpgsql_test_6(1555);
|
||||
plpgsql_test_6
|
||||
----------------
|
||||
10183
|
||||
10185
|
||||
(1 row)
|
||||
|
||||
-- test router executor parameterized PL/pgsql functions
|
||||
|
|
|
@ -101,25 +101,25 @@ SET client_min_messages TO INFO;
|
|||
EXECUTE prepared_test_1;
|
||||
count
|
||||
-------
|
||||
2984
|
||||
2985
|
||||
(1 row)
|
||||
|
||||
EXECUTE prepared_test_2;
|
||||
count
|
||||
-------
|
||||
11998
|
||||
12000
|
||||
(1 row)
|
||||
|
||||
EXECUTE prepared_test_3;
|
||||
count
|
||||
-------
|
||||
1955
|
||||
1956
|
||||
(1 row)
|
||||
|
||||
EXECUTE prepared_test_4;
|
||||
count
|
||||
-------
|
||||
7804
|
||||
7806
|
||||
(1 row)
|
||||
|
||||
EXECUTE prepared_test_5;
|
||||
|
@ -132,13 +132,13 @@ EXECUTE prepared_test_5;
|
|||
EXECUTE prepared_test_6(155);
|
||||
count
|
||||
-------
|
||||
11811
|
||||
11813
|
||||
(1 row)
|
||||
|
||||
EXECUTE prepared_test_6(1555);
|
||||
count
|
||||
-------
|
||||
10183
|
||||
10185
|
||||
(1 row)
|
||||
|
||||
EXECUTE prepared_test_7('UNITED KINGDOM', 'CHINA');
|
||||
|
@ -157,13 +157,13 @@ EXECUTE prepared_test_7('FRANCE', 'GERMANY');
|
|||
EXECUTE prepared_test_6(155);
|
||||
count
|
||||
-------
|
||||
11811
|
||||
11813
|
||||
(1 row)
|
||||
|
||||
EXECUTE prepared_test_3;
|
||||
count
|
||||
-------
|
||||
1955
|
||||
1956
|
||||
(1 row)
|
||||
|
||||
EXECUTE prepared_test_7('FRANCE', 'GERMANY');
|
||||
|
@ -181,19 +181,19 @@ EXECUTE prepared_test_5;
|
|||
EXECUTE prepared_test_1;
|
||||
count
|
||||
-------
|
||||
2984
|
||||
2985
|
||||
(1 row)
|
||||
|
||||
EXECUTE prepared_test_6(1555);
|
||||
count
|
||||
-------
|
||||
10183
|
||||
10185
|
||||
(1 row)
|
||||
|
||||
EXECUTE prepared_test_4;
|
||||
count
|
||||
-------
|
||||
7804
|
||||
7806
|
||||
(1 row)
|
||||
|
||||
EXECUTE prepared_test_7('UNITED KINGDOM', 'CHINA');
|
||||
|
@ -205,7 +205,7 @@ EXECUTE prepared_test_7('UNITED KINGDOM', 'CHINA');
|
|||
EXECUTE prepared_test_2;
|
||||
count
|
||||
-------
|
||||
11998
|
||||
12000
|
||||
(1 row)
|
||||
|
||||
-- CREATE TABLE ... AS EXECUTE prepared_statement tests
|
||||
|
@ -222,26 +222,26 @@ SET citus.task_executor_type TO 'real-time';
|
|||
EXECUTE prepared_test_1;
|
||||
count
|
||||
-------
|
||||
2984
|
||||
2985
|
||||
(1 row)
|
||||
|
||||
EXECUTE prepared_test_2;
|
||||
count
|
||||
-------
|
||||
11998
|
||||
12000
|
||||
(1 row)
|
||||
|
||||
-- execute prepared statements with different parameters
|
||||
EXECUTE prepared_test_6(155);
|
||||
count
|
||||
-------
|
||||
11811
|
||||
11813
|
||||
(1 row)
|
||||
|
||||
EXECUTE prepared_test_6(1555);
|
||||
count
|
||||
-------
|
||||
10183
|
||||
10185
|
||||
(1 row)
|
||||
|
||||
-- test router executor with parameterized non-partition columns
|
||||
|
|
|
@ -1419,7 +1419,7 @@ SELECT part_storage_type, part_key, part_replica_count, part_max_size,
|
|||
FROM master_get_table_metadata('reference_schema.reference_table_ddl');
|
||||
part_storage_type | part_key | part_replica_count | part_max_size | part_placement_policy
|
||||
-------------------+----------+--------------------+---------------+-----------------------
|
||||
t | | 2 | 307200 | 2
|
||||
t | | 2 | 1536000 | 2
|
||||
(1 row)
|
||||
|
||||
SELECT shardid AS a_shard_id FROM pg_dist_shard WHERE logicalrelid = 'reference_schema.reference_table_ddl'::regclass \gset
|
||||
|
|
|
@ -35,7 +35,7 @@ SELECT master_create_empty_shard('test_schema_support.nation_append');
|
|||
SELECT master_append_table_to_shard(1190000, 'public.nation_local', 'localhost', :master_port);
|
||||
master_append_table_to_shard
|
||||
------------------------------
|
||||
0.0266667
|
||||
0.00533333
|
||||
(1 row)
|
||||
|
||||
-- verify table actually appended to shard
|
||||
|
@ -66,7 +66,7 @@ SELECT master_create_empty_shard('test_schema_support."nation._''append"');
|
|||
SELECT master_append_table_to_shard(1190001, 'nation_local', 'localhost', :master_port);
|
||||
master_append_table_to_shard
|
||||
------------------------------
|
||||
0.0266667
|
||||
0.00533333
|
||||
(1 row)
|
||||
|
||||
-- verify table actually appended to shard
|
||||
|
@ -81,7 +81,7 @@ SET search_path TO test_schema_support;
|
|||
SELECT master_append_table_to_shard(1190000, 'public.nation_local', 'localhost', :master_port);
|
||||
master_append_table_to_shard
|
||||
------------------------------
|
||||
0.0266667
|
||||
0.00533333
|
||||
(1 row)
|
||||
|
||||
-- verify table actually appended to shard
|
||||
|
@ -95,7 +95,7 @@ SELECT COUNT(*) FROM nation_append;
|
|||
SELECT master_append_table_to_shard(1190001, 'nation_local', 'localhost', :master_port);
|
||||
master_append_table_to_shard
|
||||
------------------------------
|
||||
0.0266667
|
||||
0.00533333
|
||||
(1 row)
|
||||
|
||||
-- verify table actually appended to shard
|
||||
|
@ -806,6 +806,8 @@ SELECT master_apply_delete_command('DELETE FROM nation_append') ;
|
|||
-- create necessary objects and load data to them
|
||||
CREATE SCHEMA test_schema_support_join_1;
|
||||
CREATE SCHEMA test_schema_support_join_2;
|
||||
SET citus.shard_count to 4;
|
||||
SET citus.shard_replication_factor to 1;
|
||||
CREATE TABLE test_schema_support_join_1.nation_hash (
|
||||
n_nationkey integer not null,
|
||||
n_name char(25) not null,
|
||||
|
@ -821,41 +823,23 @@ CREATE TABLE test_schema_support_join_2.nation_hash (
|
|||
n_name char(25) not null,
|
||||
n_regionkey integer not null,
|
||||
n_comment varchar(152));
|
||||
SELECT master_create_distributed_table('test_schema_support_join_1.nation_hash', 'n_nationkey', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('test_schema_support_join_1.nation_hash', 4, 1);
|
||||
master_create_worker_shards
|
||||
-----------------------------
|
||||
SELECT create_distributed_table('test_schema_support_join_1.nation_hash', 'n_nationkey');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
\copy test_schema_support_join_1.nation_hash FROM STDIN with delimiter '|';
|
||||
SELECT master_create_distributed_table('test_schema_support_join_1.nation_hash_2', 'n_nationkey', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('test_schema_support_join_1.nation_hash_2', 4, 1);
|
||||
master_create_worker_shards
|
||||
-----------------------------
|
||||
SELECT create_distributed_table('test_schema_support_join_1.nation_hash_2', 'n_nationkey');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
\copy test_schema_support_join_1.nation_hash_2 FROM STDIN with delimiter '|';
|
||||
SELECT master_create_distributed_table('test_schema_support_join_2.nation_hash', 'n_nationkey', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('test_schema_support_join_2.nation_hash', 4, 1);
|
||||
master_create_worker_shards
|
||||
-----------------------------
|
||||
SELECT create_distributed_table('test_schema_support_join_2.nation_hash', 'n_nationkey');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -1109,3 +1093,9 @@ SELECT sum(result::int) FROM run_command_on_shards('run_test_schema.test_table',
|
|||
-- Clean up the created schema
|
||||
DROP SCHEMA run_test_schema CASCADE;
|
||||
NOTICE: drop cascades to table run_test_schema.test_table
|
||||
DROP SCHEMA test_schema_support_join_1 CASCADE;
|
||||
NOTICE: drop cascades to 2 other objects
|
||||
DETAIL: drop cascades to table test_schema_support_join_1.nation_hash
|
||||
drop cascades to table test_schema_support_join_1.nation_hash_2
|
||||
DROP SCHEMA test_schema_support_join_2 CASCADE;
|
||||
NOTICE: drop cascades to table test_schema_support_join_2.nation_hash
|
||||
|
|
|
@ -185,7 +185,7 @@ SELECT master_create_empty_shard('test_append_table');
|
|||
SELECT * FROM master_append_table_to_shard(1440010, 'append_stage_table', 'localhost', :master_port);
|
||||
master_append_table_to_shard
|
||||
------------------------------
|
||||
0.0266667
|
||||
0.00533333
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_empty_shard('test_append_table') AS new_shard_id;
|
||||
|
@ -197,7 +197,7 @@ SELECT master_create_empty_shard('test_append_table') AS new_shard_id;
|
|||
SELECT * FROM master_append_table_to_shard(1440011, 'append_stage_table', 'localhost', :master_port);
|
||||
master_append_table_to_shard
|
||||
------------------------------
|
||||
0.0266667
|
||||
0.00533333
|
||||
(1 row)
|
||||
|
||||
UPDATE test_append_table SET col_2 = 5;
|
||||
|
|
|
@ -185,7 +185,7 @@ SELECT master_create_empty_shard('test_append_table');
|
|||
SELECT * FROM master_append_table_to_shard(1440010, 'append_stage_table', 'localhost', :master_port);
|
||||
master_append_table_to_shard
|
||||
------------------------------
|
||||
0.0266667
|
||||
0.00533333
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_empty_shard('test_append_table') AS new_shard_id;
|
||||
|
@ -197,7 +197,7 @@ SELECT master_create_empty_shard('test_append_table') AS new_shard_id;
|
|||
SELECT * FROM master_append_table_to_shard(1440011, 'append_stage_table', 'localhost', :master_port);
|
||||
master_append_table_to_shard
|
||||
------------------------------
|
||||
0.0266667
|
||||
0.00533333
|
||||
(1 row)
|
||||
|
||||
UPDATE test_append_table SET col_2 = 5;
|
||||
|
|
|
@ -391,7 +391,7 @@ SELECT o_orderstatus, count(*), avg(o_totalprice) FROM orders
|
|||
ORDER BY o_orderstatus;
|
||||
o_orderstatus | count | avg
|
||||
---------------+-------+---------------------
|
||||
O | 1460 | 143355.847013698630
|
||||
O | 1461 | 143326.447029431896
|
||||
P | 75 | 164847.914533333333
|
||||
(2 rows)
|
||||
|
||||
|
@ -403,7 +403,7 @@ SELECT o_orderstatus, sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
|||
o_orderstatus | sum | avg
|
||||
---------------+------+--------------------
|
||||
F | 8559 | 3.0126715945089757
|
||||
O | 8901 | 3.0050641458474004
|
||||
O | 8904 | 3.0040485829959514
|
||||
(2 rows)
|
||||
|
||||
-- now, test the cases where Citus do or do not need to create
|
||||
|
|
|
@ -335,7 +335,7 @@ SELECT o_orderstatus, count(*), avg(o_totalprice) FROM orders
|
|||
ORDER BY o_orderstatus;
|
||||
o_orderstatus | count | avg
|
||||
---------------+-------+---------------------
|
||||
O | 1460 | 143355.847013698630
|
||||
O | 1461 | 143326.447029431896
|
||||
P | 75 | 164847.914533333333
|
||||
(2 rows)
|
||||
|
||||
|
@ -347,7 +347,7 @@ SELECT o_orderstatus, sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
|||
o_orderstatus | sum | avg
|
||||
---------------+------+--------------------
|
||||
F | 8559 | 3.0126715945089757
|
||||
O | 8901 | 3.0050641458474004
|
||||
O | 8904 | 3.0040485829959514
|
||||
(2 rows)
|
||||
|
||||
-- now, test the cases where Citus do or do not need to create
|
||||
|
|
|
@ -39,25 +39,25 @@ SET client_min_messages TO INFO;
|
|||
SELECT sql_test_no_1();
|
||||
sql_test_no_1
|
||||
---------------
|
||||
2984
|
||||
2985
|
||||
(1 row)
|
||||
|
||||
SELECT sql_test_no_2();
|
||||
sql_test_no_2
|
||||
---------------
|
||||
11998
|
||||
12000
|
||||
(1 row)
|
||||
|
||||
SELECT sql_test_no_3();
|
||||
sql_test_no_3
|
||||
---------------
|
||||
1955
|
||||
1956
|
||||
(1 row)
|
||||
|
||||
SELECT sql_test_no_4();
|
||||
sql_test_no_4
|
||||
---------------
|
||||
7804
|
||||
7806
|
||||
(1 row)
|
||||
|
||||
-- run the tests which do not require re-partition
|
||||
|
@ -67,13 +67,13 @@ SET citus.task_executor_type TO 'real-time';
|
|||
SELECT sql_test_no_1();
|
||||
sql_test_no_1
|
||||
---------------
|
||||
2984
|
||||
2985
|
||||
(1 row)
|
||||
|
||||
SELECT sql_test_no_2();
|
||||
sql_test_no_2
|
||||
---------------
|
||||
11998
|
||||
12000
|
||||
(1 row)
|
||||
|
||||
-- test router executor parameterized sql functions
|
||||
|
|
|
@ -17,8 +17,11 @@ FROM
|
|||
l_orderkey = o_orderkey
|
||||
GROUP BY
|
||||
l_orderkey) AS unit_prices;
|
||||
ERROR: cannot push down this subquery
|
||||
DETAIL: Shards of relations in subquery need to have 1-to-1 shard partitioning
|
||||
avg
|
||||
-------------------------
|
||||
142158.8766934673366834
|
||||
(1 row)
|
||||
|
||||
-- Update metadata in order to make all shards equal
|
||||
-- note that the table is created on multi_insert_select_create_table.sql
|
||||
UPDATE
|
||||
|
@ -271,6 +274,8 @@ ORDER BY l_orderkey DESC
|
|||
LIMIT 10;
|
||||
l_orderkey
|
||||
------------
|
||||
14947
|
||||
14947
|
||||
14946
|
||||
14946
|
||||
14945
|
||||
|
@ -279,8 +284,6 @@ LIMIT 10;
|
|||
14945
|
||||
14945
|
||||
14945
|
||||
14944
|
||||
14944
|
||||
(10 rows)
|
||||
|
||||
-- non-implicit typecast is not supported in equi-join
|
||||
|
@ -672,7 +675,7 @@ WHERE
|
|||
unit_price < 10000;
|
||||
avg
|
||||
-----------------------
|
||||
4968.2889885208475549
|
||||
4968.4946466804019323
|
||||
(1 row)
|
||||
|
||||
-- Check that if subquery is pulled, we don't error and run query properly.
|
||||
|
|
|
@ -14,7 +14,7 @@ SELECT count(*) FROM lineitem_hash_part;
|
|||
SELECT count(*) FROM orders_hash_part;
|
||||
count
|
||||
-------
|
||||
2984
|
||||
2985
|
||||
(1 row)
|
||||
|
||||
-- create a view for priority orders
|
||||
|
@ -24,14 +24,14 @@ SELECT o_orderpriority, count(*) FROM priority_orders GROUP BY 1 ORDER BY 2, 1;
|
|||
o_orderpriority | count
|
||||
-----------------+-------
|
||||
2-HIGH | 593
|
||||
1-URGENT | 603
|
||||
1-URGENT | 604
|
||||
(2 rows)
|
||||
|
||||
SELECT o_orderpriority, count(*) FROM orders_hash_part WHERE o_orderpriority < '3-MEDIUM' GROUP BY 1 ORDER BY 2,1;
|
||||
o_orderpriority | count
|
||||
-----------------+-------
|
||||
2-HIGH | 593
|
||||
1-URGENT | 603
|
||||
1-URGENT | 604
|
||||
(2 rows)
|
||||
|
||||
-- filters
|
||||
|
@ -39,7 +39,7 @@ SELECT o_orderpriority, count(*) as all, count(*) FILTER (WHERE o_orderstatus ='
|
|||
o_orderpriority | all | fullfilled
|
||||
-----------------+-----+------------
|
||||
2-HIGH | 593 | 271
|
||||
1-URGENT | 603 | 280
|
||||
1-URGENT | 604 | 280
|
||||
(2 rows)
|
||||
|
||||
-- having
|
||||
|
|
|
@ -380,7 +380,7 @@ ALTER TABLE lineitem_renamed RENAME TO lineitem_alter;
|
|||
|
||||
-- show rename worked on one worker, too
|
||||
\c - - - :worker_1_port
|
||||
SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220009' /* failed copy trails */ ORDER BY relname;
|
||||
SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220002' /* failed copy trails */ ORDER BY relname;
|
||||
\c - - - :master_port
|
||||
|
||||
-- verify that we can set and reset storage parameters
|
||||
|
@ -388,14 +388,14 @@ ALTER TABLE lineitem_alter SET(fillfactor=40);
|
|||
SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter';
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220009' /* failed copy trails */ ORDER BY relname;
|
||||
SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220002' /* failed copy trails */ ORDER BY relname;
|
||||
\c - - - :master_port
|
||||
|
||||
ALTER TABLE lineitem_alter RESET(fillfactor);
|
||||
SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter';
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220009' /* failed copy trails */ ORDER BY relname;
|
||||
SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220002' /* failed copy trails */ ORDER BY relname;
|
||||
\c - - - :master_port
|
||||
|
||||
-- verify that we can rename indexes on distributed tables
|
||||
|
@ -494,7 +494,7 @@ CREATE FUNCTION update_value() RETURNS trigger AS $up$
|
|||
$up$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER update_value
|
||||
BEFORE INSERT ON trigger_table_220028
|
||||
BEFORE INSERT ON trigger_table_220017
|
||||
FOR EACH ROW EXECUTE PROCEDURE update_value();
|
||||
|
||||
\c - - - :master_port
|
||||
|
|
|
@ -354,7 +354,7 @@ WHERE shardid = :new_shard_id;
|
|||
|
||||
SELECT master_create_empty_shard('orders_subquery') AS new_shard_id
|
||||
\gset
|
||||
UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14946
|
||||
UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14947
|
||||
WHERE shardid = :new_shard_id;
|
||||
|
||||
SET citus.shard_max_size TO "1MB";
|
||||
|
|
|
@ -163,14 +163,13 @@ SELECT * FROM master_apply_delete_command('DELETE FROM multi_outer_join_left');
|
|||
SELECT * FROM master_apply_delete_command('DELETE FROM multi_outer_join_right');
|
||||
|
||||
-- reload shards with 1-1 matching
|
||||
\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|'
|
||||
\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-subset-11-20.data' with delimiter '|'
|
||||
\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
|
||||
|
||||
\copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|'
|
||||
\copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
|
||||
\copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-subset-21-30.data' with delimiter '|'
|
||||
|
||||
-- multi_outer_join_third is a single shard table
|
||||
|
||||
-- Regular left join should work as expected
|
||||
SELECT
|
||||
min(l_custkey), max(l_custkey)
|
||||
|
@ -438,8 +437,10 @@ FROM
|
|||
-- simple test to ensure anti-joins work with hash-partitioned tables
|
||||
CREATE TABLE left_values(val int);
|
||||
|
||||
SELECT master_create_distributed_table('left_values', 'val', 'hash');
|
||||
SELECT master_create_worker_shards('left_values', 16, 1);
|
||||
SET citus.shard_count to 16;
|
||||
SET citus.shard_replication_factor to 1;
|
||||
|
||||
SELECT create_distributed_table('left_values', 'val');
|
||||
|
||||
\copy left_values from stdin
|
||||
1
|
||||
|
@ -451,8 +452,7 @@ SELECT master_create_worker_shards('left_values', 16, 1);
|
|||
|
||||
CREATE TABLE right_values(val int);
|
||||
|
||||
SELECT master_create_distributed_table('right_values', 'val', 'hash');
|
||||
SELECT master_create_worker_shards('right_values', 16, 1);
|
||||
SELECT create_distributed_table('right_values', 'val');
|
||||
|
||||
\copy right_values from stdin
|
||||
2
|
||||
|
|
|
@ -108,7 +108,7 @@ SELECT p_mfgr, count(distinct p_partkey) FROM part GROUP BY p_mfgr ORDER BY p_mf
|
|||
SELECT count(distinct o_orderkey), count(distinct o_custkey) FROM orders;
|
||||
count | count
|
||||
-------+-------
|
||||
2984 | 923
|
||||
2985 | 923
|
||||
(1 row)
|
||||
|
||||
-- Hash partitioned tables:
|
||||
|
|
|
@ -43,10 +43,7 @@ SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' OR
|
|||
relname | reloptions
|
||||
-----------------------+-----------------
|
||||
lineitem_alter_220000 | {fillfactor=80}
|
||||
lineitem_alter_220001 | {fillfactor=80}
|
||||
lineitem_alter_220002 | {fillfactor=80}
|
||||
lineitem_alter_220003 | {fillfactor=80}
|
||||
(4 rows)
|
||||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
-- Verify that we can add columns
|
||||
|
@ -393,7 +390,7 @@ LINE 1: ALTER TABLE lineitem_alter ADD COLUMN new_column non_existen...
|
|||
^
|
||||
ALTER TABLE lineitem_alter ALTER COLUMN null_column SET NOT NULL;
|
||||
ERROR: column "null_column" contains null values
|
||||
CONTEXT: while executing command on localhost:57638
|
||||
CONTEXT: while executing command on localhost:57637
|
||||
ALTER TABLE lineitem_alter ALTER COLUMN l_partkey SET DEFAULT 'a';
|
||||
ERROR: invalid input syntax for integer: "a"
|
||||
-- Verify that we error out on RENAME CONSTRAINT statement
|
||||
|
@ -750,7 +747,7 @@ SELECT master_create_worker_shards('test_ab', 8, 2);
|
|||
INSERT INTO test_ab VALUES (2, 10);
|
||||
INSERT INTO test_ab VALUES (2, 11);
|
||||
CREATE UNIQUE INDEX temp_unique_index_1 ON test_ab(a);
|
||||
ERROR: could not create unique index "temp_unique_index_1_220022"
|
||||
ERROR: could not create unique index "temp_unique_index_1_220011"
|
||||
DETAIL: Key (a)=(2) is duplicated.
|
||||
CONTEXT: while executing command on localhost:57638
|
||||
SELECT shardid FROM pg_dist_shard_placement NATURAL JOIN pg_dist_shard
|
||||
|
@ -823,43 +820,21 @@ SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_renamed%' ORDER BY re
|
|||
-------------------------
|
||||
lineitem_renamed_220000
|
||||
lineitem_renamed_220001
|
||||
lineitem_renamed_220002
|
||||
lineitem_renamed_220003
|
||||
lineitem_renamed_220004
|
||||
lineitem_renamed_220005
|
||||
lineitem_renamed_220006
|
||||
lineitem_renamed_220007
|
||||
lineitem_renamed_220008
|
||||
lineitem_renamed_220010
|
||||
lineitem_renamed_220011
|
||||
lineitem_renamed_220012
|
||||
lineitem_renamed_220013
|
||||
lineitem_renamed_220014
|
||||
(14 rows)
|
||||
(3 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
-- revert it to original name
|
||||
ALTER TABLE lineitem_renamed RENAME TO lineitem_alter;
|
||||
-- show rename worked on one worker, too
|
||||
\c - - - :worker_1_port
|
||||
SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220009' /* failed copy trails */ ORDER BY relname;
|
||||
SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220002' /* failed copy trails */ ORDER BY relname;
|
||||
relname
|
||||
-----------------------
|
||||
lineitem_alter_220000
|
||||
lineitem_alter_220001
|
||||
lineitem_alter_220002
|
||||
lineitem_alter_220003
|
||||
lineitem_alter_220004
|
||||
lineitem_alter_220005
|
||||
lineitem_alter_220006
|
||||
lineitem_alter_220007
|
||||
lineitem_alter_220008
|
||||
lineitem_alter_220010
|
||||
lineitem_alter_220011
|
||||
lineitem_alter_220012
|
||||
lineitem_alter_220013
|
||||
lineitem_alter_220014
|
||||
(14 rows)
|
||||
(3 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
-- verify that we can set and reset storage parameters
|
||||
|
@ -871,24 +846,13 @@ SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter';
|
|||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220009' /* failed copy trails */ ORDER BY relname;
|
||||
SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220002' /* failed copy trails */ ORDER BY relname;
|
||||
relname | reloptions
|
||||
-----------------------+-----------------
|
||||
lineitem_alter_220000 | {fillfactor=40}
|
||||
lineitem_alter_220001 | {fillfactor=40}
|
||||
lineitem_alter_220002 | {fillfactor=40}
|
||||
lineitem_alter_220003 | {fillfactor=40}
|
||||
lineitem_alter_220004 | {fillfactor=40}
|
||||
lineitem_alter_220005 | {fillfactor=40}
|
||||
lineitem_alter_220006 | {fillfactor=40}
|
||||
lineitem_alter_220007 | {fillfactor=40}
|
||||
lineitem_alter_220008 | {fillfactor=40}
|
||||
lineitem_alter_220010 | {fillfactor=40}
|
||||
lineitem_alter_220011 | {fillfactor=40}
|
||||
lineitem_alter_220012 | {fillfactor=40}
|
||||
lineitem_alter_220013 | {fillfactor=40}
|
||||
lineitem_alter_220014 | {fillfactor=40}
|
||||
(14 rows)
|
||||
(3 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
ALTER TABLE lineitem_alter RESET(fillfactor);
|
||||
|
@ -899,24 +863,13 @@ SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter';
|
|||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220009' /* failed copy trails */ ORDER BY relname;
|
||||
SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220002' /* failed copy trails */ ORDER BY relname;
|
||||
relname | reloptions
|
||||
-----------------------+------------
|
||||
lineitem_alter_220000 |
|
||||
lineitem_alter_220001 |
|
||||
lineitem_alter_220002 |
|
||||
lineitem_alter_220003 |
|
||||
lineitem_alter_220004 |
|
||||
lineitem_alter_220005 |
|
||||
lineitem_alter_220006 |
|
||||
lineitem_alter_220007 |
|
||||
lineitem_alter_220008 |
|
||||
lineitem_alter_220010 |
|
||||
lineitem_alter_220011 |
|
||||
lineitem_alter_220012 |
|
||||
lineitem_alter_220013 |
|
||||
lineitem_alter_220014 |
|
||||
(14 rows)
|
||||
(3 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
-- verify that we can rename indexes on distributed tables
|
||||
|
@ -936,19 +889,8 @@ SELECT relname FROM pg_class WHERE relname LIKE 'idx_lineitem_linenumber%' ORDER
|
|||
--------------------------------
|
||||
idx_lineitem_linenumber_220000
|
||||
idx_lineitem_linenumber_220001
|
||||
idx_lineitem_linenumber_220002
|
||||
idx_lineitem_linenumber_220003
|
||||
idx_lineitem_linenumber_220004
|
||||
idx_lineitem_linenumber_220005
|
||||
idx_lineitem_linenumber_220006
|
||||
idx_lineitem_linenumber_220007
|
||||
idx_lineitem_linenumber_220008
|
||||
idx_lineitem_linenumber_220010
|
||||
idx_lineitem_linenumber_220011
|
||||
idx_lineitem_linenumber_220012
|
||||
idx_lineitem_linenumber_220013
|
||||
idx_lineitem_linenumber_220014
|
||||
(14 rows)
|
||||
(3 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
-- now get rid of the index
|
||||
|
@ -1052,7 +994,7 @@ CREATE FUNCTION update_value() RETURNS trigger AS $up$
|
|||
END;
|
||||
$up$ LANGUAGE plpgsql;
|
||||
CREATE TRIGGER update_value
|
||||
BEFORE INSERT ON trigger_table_220028
|
||||
BEFORE INSERT ON trigger_table_220017
|
||||
FOR EACH ROW EXECUTE PROCEDURE update_value();
|
||||
\c - - - :master_port
|
||||
INSERT INTO trigger_table VALUES (1, 'trigger disabled');
|
||||
|
@ -1096,7 +1038,7 @@ SET citus.enable_ddl_propagation to true;
|
|||
SELECT master_apply_delete_command('DELETE FROM lineitem_alter');
|
||||
master_apply_delete_command
|
||||
-----------------------------
|
||||
14
|
||||
3
|
||||
(1 row)
|
||||
|
||||
DROP TABLE lineitem_alter;
|
||||
|
@ -1106,7 +1048,7 @@ DROP TABLE lineitem_alter;
|
|||
SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%';
|
||||
relname
|
||||
-----------------------
|
||||
lineitem_alter_220009
|
||||
lineitem_alter_220002
|
||||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
|
@ -1149,10 +1091,10 @@ SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist';
|
|||
SELECT relname, reloptions FROM pg_class WHERE relkind = 'r' AND relname LIKE 'hash_dist%' ORDER BY relname;
|
||||
relname | reloptions
|
||||
------------------+-----------------
|
||||
hash_dist_220033 | {fillfactor=40}
|
||||
hash_dist_220034 | {fillfactor=40}
|
||||
hash_dist_220035 | {fillfactor=40}
|
||||
hash_dist_220036 | {fillfactor=40}
|
||||
hash_dist_220022 | {fillfactor=40}
|
||||
hash_dist_220023 | {fillfactor=40}
|
||||
hash_dist_220024 | {fillfactor=40}
|
||||
hash_dist_220025 | {fillfactor=40}
|
||||
(4 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
|
@ -1168,10 +1110,10 @@ SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist_pkey';
|
|||
SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'hash_dist_pkey%' ORDER BY relname;
|
||||
relname | reloptions
|
||||
-----------------------+-----------------
|
||||
hash_dist_pkey_220033 | {fillfactor=40}
|
||||
hash_dist_pkey_220034 | {fillfactor=40}
|
||||
hash_dist_pkey_220035 | {fillfactor=40}
|
||||
hash_dist_pkey_220036 | {fillfactor=40}
|
||||
hash_dist_pkey_220022 | {fillfactor=40}
|
||||
hash_dist_pkey_220023 | {fillfactor=40}
|
||||
hash_dist_pkey_220024 | {fillfactor=40}
|
||||
hash_dist_pkey_220025 | {fillfactor=40}
|
||||
(4 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
|
@ -1186,10 +1128,10 @@ SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist_pkey';
|
|||
SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'hash_dist_pkey%' ORDER BY relname;
|
||||
relname | reloptions
|
||||
-----------------------+------------
|
||||
hash_dist_pkey_220033 |
|
||||
hash_dist_pkey_220034 |
|
||||
hash_dist_pkey_220035 |
|
||||
hash_dist_pkey_220036 |
|
||||
hash_dist_pkey_220022 |
|
||||
hash_dist_pkey_220023 |
|
||||
hash_dist_pkey_220024 |
|
||||
hash_dist_pkey_220025 |
|
||||
(4 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
|
@ -1210,10 +1152,10 @@ SELECT relname, reloptions FROM pg_class WHERE relname = 'another_index';
|
|||
SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'another_index%' ORDER BY relname;
|
||||
relname | reloptions
|
||||
----------------------+-----------------
|
||||
another_index_220033 | {fillfactor=50}
|
||||
another_index_220034 | {fillfactor=50}
|
||||
another_index_220035 | {fillfactor=50}
|
||||
another_index_220036 | {fillfactor=50}
|
||||
another_index_220022 | {fillfactor=50}
|
||||
another_index_220023 | {fillfactor=50}
|
||||
another_index_220024 | {fillfactor=50}
|
||||
another_index_220025 | {fillfactor=50}
|
||||
(4 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
|
|
|
@ -157,7 +157,7 @@ FROM
|
|||
WHERE 'multi_append_table_to_shard_date'::regclass::oid = logicalrelid;
|
||||
master_append_table_to_shard
|
||||
------------------------------
|
||||
0.0266667
|
||||
0.00533333
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM multi_append_table_to_shard_date;
|
||||
|
@ -174,7 +174,7 @@ FROM
|
|||
WHERE 'multi_append_table_to_shard_date'::regclass::oid = logicalrelid;
|
||||
master_append_table_to_shard
|
||||
------------------------------
|
||||
0.0266667
|
||||
0.00533333
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM multi_append_table_to_shard_date;
|
||||
|
@ -194,7 +194,7 @@ FROM
|
|||
WHERE 'multi_append_table_to_shard_date'::regclass::oid = logicalrelid;
|
||||
master_append_table_to_shard
|
||||
------------------------------
|
||||
0.0266667
|
||||
0.00533333
|
||||
(1 row)
|
||||
|
||||
ROLLBACK;
|
||||
|
|
|
@ -433,7 +433,7 @@ UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986
|
|||
WHERE shardid = :new_shard_id;
|
||||
SELECT master_create_empty_shard('orders_subquery') AS new_shard_id
|
||||
\gset
|
||||
UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14946
|
||||
UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14947
|
||||
WHERE shardid = :new_shard_id;
|
||||
SET citus.shard_max_size TO "1MB";
|
||||
\copy lineitem_subquery FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
||||
|
|
|
@ -70,8 +70,8 @@ SELECT
|
|||
min(l_custkey), max(l_custkey)
|
||||
FROM
|
||||
multi_outer_join_left a LEFT JOIN multi_outer_join_third b ON (l_custkey = t_custkey);
|
||||
ERROR: cannot perform distributed planning on this query
|
||||
DETAIL: Shards of relations in outer join queries must have 1-to-1 shard partitioning
|
||||
ERROR: cannot run outer join query if join is not on the partition column
|
||||
DETAIL: Outer joins requiring repartitioning are not supported.
|
||||
SELECT
|
||||
min(t_custkey), max(t_custkey)
|
||||
FROM
|
||||
|
@ -187,8 +187,8 @@ SELECT
|
|||
min(r_custkey), max(r_custkey)
|
||||
FROM
|
||||
multi_outer_join_left a RIGHT JOIN multi_outer_join_right b ON (l_custkey = r_custkey);
|
||||
ERROR: cannot perform distributed planning on this query
|
||||
DETAIL: Shards of relations in outer join queries must have 1-to-1 shard partitioning
|
||||
ERROR: cannot run outer join query if join is not on the partition column
|
||||
DETAIL: Outer joins requiring repartitioning are not supported.
|
||||
-- Reverse right join should be same as left join
|
||||
SELECT
|
||||
min(l_custkey), max(l_custkey)
|
||||
|
@ -207,8 +207,8 @@ SELECT
|
|||
min(l_custkey), max(l_custkey)
|
||||
FROM
|
||||
multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_custkey = r_custkey);
|
||||
ERROR: cannot perform distributed planning on this query
|
||||
DETAIL: Shards of relations in outer join queries must have 1-to-1 shard partitioning
|
||||
ERROR: cannot run outer join query if join is not on the partition column
|
||||
DETAIL: Outer joins requiring repartitioning are not supported.
|
||||
-- empty tables
|
||||
SELECT * FROM master_apply_delete_command('DELETE FROM multi_outer_join_left');
|
||||
master_apply_delete_command
|
||||
|
@ -223,10 +223,10 @@ SELECT * FROM master_apply_delete_command('DELETE FROM multi_outer_join_right');
|
|||
(1 row)
|
||||
|
||||
-- reload shards with 1-1 matching
|
||||
\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|'
|
||||
\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-subset-11-20.data' with delimiter '|'
|
||||
\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
|
||||
\copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|'
|
||||
\copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
|
||||
\copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-subset-21-30.data' with delimiter '|'
|
||||
-- multi_outer_join_third is a single shard table
|
||||
-- Regular left join should work as expected
|
||||
SELECT
|
||||
|
@ -236,7 +236,7 @@ FROM
|
|||
LOG: join order: [ "multi_outer_join_left" ][ local partition join "multi_outer_join_right" ]
|
||||
min | max
|
||||
-----+-----
|
||||
1 | 30
|
||||
11 | 30
|
||||
(1 row)
|
||||
|
||||
-- Since we cannot broadcast or re-partition, joining on a different key should error out
|
||||
|
@ -256,7 +256,7 @@ WHERE
|
|||
LOG: join order: [ "multi_outer_join_left" ][ local partition join "multi_outer_join_right" ]
|
||||
min | max
|
||||
-----+-----
|
||||
1 | 10
|
||||
23 | 29
|
||||
(1 row)
|
||||
|
||||
-- Partial anti-join with specific value (5, 11-15)
|
||||
|
@ -269,7 +269,7 @@ WHERE
|
|||
LOG: join order: [ "multi_outer_join_left" ][ local partition join "multi_outer_join_right" ]
|
||||
min | max
|
||||
-----+-----
|
||||
1 | 15
|
||||
23 | 29
|
||||
(1 row)
|
||||
|
||||
-- This query is an INNER JOIN in disguise since there cannot be NULL results (21)
|
||||
|
@ -295,7 +295,7 @@ FROM
|
|||
LOG: join order: [ "multi_outer_join_left" ][ local partition join "multi_outer_join_right" ]
|
||||
count | count
|
||||
-------+-------
|
||||
25 | 1
|
||||
17 | 1
|
||||
(1 row)
|
||||
|
||||
-- Right join should be allowed in this case
|
||||
|
@ -317,7 +317,7 @@ FROM
|
|||
LOG: join order: [ "multi_outer_join_left" ][ local partition join "multi_outer_join_right" ]
|
||||
min | max
|
||||
-----+-----
|
||||
1 | 30
|
||||
11 | 30
|
||||
(1 row)
|
||||
|
||||
-- complex query tree should error out
|
||||
|
@ -350,32 +350,24 @@ FROM
|
|||
LOG: join order: [ "multi_outer_join_left" ][ local partition join "multi_outer_join_right" ][ broadcast join "multi_outer_join_third" ]
|
||||
l_custkey | r_custkey | t_custkey
|
||||
-----------+-----------+-----------
|
||||
1 | |
|
||||
2 | |
|
||||
3 | |
|
||||
4 | |
|
||||
5 | |
|
||||
6 | |
|
||||
7 | |
|
||||
8 | |
|
||||
9 | |
|
||||
10 | |
|
||||
11 | 11 | 11
|
||||
12 | 12 | 12
|
||||
13 | 13 | 13
|
||||
14 | 14 | 14
|
||||
15 | 15 | 15
|
||||
16 | 16 | 16
|
||||
17 | 17 | 17
|
||||
18 | 18 | 18
|
||||
20 | 20 | 20
|
||||
21 | 21 | 21
|
||||
22 | 22 | 22
|
||||
23 | 23 | 23
|
||||
23 | |
|
||||
24 | 24 | 24
|
||||
25 | 25 | 25
|
||||
25 | |
|
||||
26 | 26 | 26
|
||||
27 | 27 | 27
|
||||
28 | 28 | 28
|
||||
29 | 29 | 29
|
||||
29 | |
|
||||
30 | 30 | 30
|
||||
(25 rows)
|
||||
(17 rows)
|
||||
|
||||
-- Right join with single shard right most table should error out
|
||||
SELECT
|
||||
|
@ -397,25 +389,22 @@ LOG: join order: [ "multi_outer_join_right" ][ broadcast join "multi_outer_join
|
|||
-----------+-----------+-----------
|
||||
11 | 11 | 11
|
||||
12 | 12 | 12
|
||||
13 | 13 | 13
|
||||
13 | 13 |
|
||||
14 | 14 | 14
|
||||
15 | 15 | 15
|
||||
16 | 16 |
|
||||
17 | 17 |
|
||||
18 | 18 |
|
||||
15 | 15 |
|
||||
16 | 16 | 16
|
||||
17 | 17 | 17
|
||||
18 | 18 | 18
|
||||
19 | 19 |
|
||||
20 | 20 |
|
||||
20 | 20 | 20
|
||||
21 | 21 | 21
|
||||
22 | 22 | 22
|
||||
23 | 23 | 23
|
||||
24 | 24 | 24
|
||||
25 | 25 | 25
|
||||
26 | 26 | 26
|
||||
27 | 27 | 27
|
||||
28 | 28 | 28
|
||||
29 | 29 | 29
|
||||
30 | 30 | 30
|
||||
(20 rows)
|
||||
(17 rows)
|
||||
|
||||
-- Make it anti-join, should display values with l_custkey is null
|
||||
SELECT
|
||||
|
@ -429,12 +418,10 @@ WHERE
|
|||
LOG: join order: [ "multi_outer_join_right" ][ broadcast join "multi_outer_join_third" ][ local partition join "multi_outer_join_left" ]
|
||||
t_custkey | r_custkey | l_custkey
|
||||
-----------+-----------+-----------
|
||||
16 | 16 |
|
||||
17 | 17 |
|
||||
18 | 18 |
|
||||
13 | 13 |
|
||||
15 | 15 |
|
||||
19 | 19 |
|
||||
20 | 20 |
|
||||
(5 rows)
|
||||
(3 rows)
|
||||
|
||||
-- Cascading right join with single shard left most table should error out
|
||||
SELECT
|
||||
|
@ -453,37 +440,27 @@ FROM
|
|||
LOG: join order: [ "multi_outer_join_left" ][ local partition join "multi_outer_join_right" ]
|
||||
l_custkey | r_custkey
|
||||
-----------+-----------
|
||||
1 |
|
||||
2 |
|
||||
3 |
|
||||
4 |
|
||||
5 |
|
||||
6 |
|
||||
7 |
|
||||
8 |
|
||||
9 |
|
||||
10 |
|
||||
11 | 11
|
||||
12 | 12
|
||||
13 | 13
|
||||
14 | 14
|
||||
15 | 15
|
||||
| 20
|
||||
| 17
|
||||
| 18
|
||||
16 | 16
|
||||
17 | 17
|
||||
18 | 18
|
||||
20 | 20
|
||||
| 15
|
||||
| 13
|
||||
| 19
|
||||
| 16
|
||||
21 | 21
|
||||
22 | 22
|
||||
23 | 23
|
||||
23 |
|
||||
24 | 24
|
||||
25 | 25
|
||||
25 |
|
||||
26 | 26
|
||||
27 | 27
|
||||
28 | 28
|
||||
29 | 29
|
||||
29 |
|
||||
30 | 30
|
||||
(30 rows)
|
||||
(20 rows)
|
||||
|
||||
-- full outer join + anti (right) should work with 1-1 matched shards
|
||||
SELECT
|
||||
|
@ -496,17 +473,10 @@ WHERE
|
|||
LOG: join order: [ "multi_outer_join_left" ][ local partition join "multi_outer_join_right" ]
|
||||
l_custkey | r_custkey
|
||||
-----------+-----------
|
||||
1 |
|
||||
2 |
|
||||
3 |
|
||||
4 |
|
||||
5 |
|
||||
6 |
|
||||
7 |
|
||||
8 |
|
||||
9 |
|
||||
10 |
|
||||
(10 rows)
|
||||
23 |
|
||||
25 |
|
||||
29 |
|
||||
(3 rows)
|
||||
|
||||
-- full outer join + anti (left) should work with 1-1 matched shards
|
||||
SELECT
|
||||
|
@ -519,12 +489,10 @@ WHERE
|
|||
LOG: join order: [ "multi_outer_join_left" ][ local partition join "multi_outer_join_right" ]
|
||||
l_custkey | r_custkey
|
||||
-----------+-----------
|
||||
| 20
|
||||
| 17
|
||||
| 18
|
||||
| 15
|
||||
| 13
|
||||
| 19
|
||||
| 16
|
||||
(5 rows)
|
||||
(3 rows)
|
||||
|
||||
-- full outer join + anti (both) should work with 1-1 matched shards
|
||||
SELECT
|
||||
|
@ -537,22 +505,13 @@ WHERE
|
|||
LOG: join order: [ "multi_outer_join_left" ][ local partition join "multi_outer_join_right" ]
|
||||
l_custkey | r_custkey
|
||||
-----------+-----------
|
||||
1 |
|
||||
2 |
|
||||
3 |
|
||||
4 |
|
||||
5 |
|
||||
6 |
|
||||
7 |
|
||||
8 |
|
||||
9 |
|
||||
10 |
|
||||
| 20
|
||||
| 17
|
||||
| 18
|
||||
| 15
|
||||
| 13
|
||||
| 19
|
||||
| 16
|
||||
(15 rows)
|
||||
23 |
|
||||
25 |
|
||||
29 |
|
||||
(6 rows)
|
||||
|
||||
-- full outer join should error out for mismatched shards
|
||||
SELECT
|
||||
|
@ -560,8 +519,8 @@ SELECT
|
|||
FROM
|
||||
multi_outer_join_left l1
|
||||
FULL JOIN multi_outer_join_third t1 ON (l1.l_custkey = t1.t_custkey);
|
||||
ERROR: cannot perform distributed planning on this query
|
||||
DETAIL: Shards of relations in outer join queries must have 1-to-1 shard partitioning
|
||||
ERROR: cannot run outer join query if join is not on the partition column
|
||||
DETAIL: Outer joins requiring repartitioning are not supported.
|
||||
-- inner join + single shard left join should work
|
||||
SELECT
|
||||
l_custkey, r_custkey, t_custkey
|
||||
|
@ -574,20 +533,19 @@ LOG: join order: [ "multi_outer_join_left" ][ local partition join "multi_outer
|
|||
-----------+-----------+-----------
|
||||
11 | 11 | 11
|
||||
12 | 12 | 12
|
||||
13 | 13 | 13
|
||||
14 | 14 | 14
|
||||
15 | 15 | 15
|
||||
16 | 16 | 16
|
||||
17 | 17 | 17
|
||||
18 | 18 | 18
|
||||
20 | 20 | 20
|
||||
21 | 21 | 21
|
||||
22 | 22 | 22
|
||||
23 | 23 | 23
|
||||
24 | 24 | 24
|
||||
25 | 25 | 25
|
||||
26 | 26 | 26
|
||||
27 | 27 | 27
|
||||
28 | 28 | 28
|
||||
29 | 29 | 29
|
||||
30 | 30 | 30
|
||||
(15 rows)
|
||||
(14 rows)
|
||||
|
||||
-- inner (broadcast) join + 2 shards left (local) join should work
|
||||
SELECT
|
||||
|
@ -599,32 +557,24 @@ FROM
|
|||
LOG: join order: [ "multi_outer_join_left" ][ broadcast join "multi_outer_join_third" ][ local partition join "multi_outer_join_right" ]
|
||||
l_custkey | t_custkey | r_custkey
|
||||
-----------+-----------+-----------
|
||||
1 | 1 |
|
||||
2 | 2 |
|
||||
3 | 3 |
|
||||
4 | 4 |
|
||||
5 | 5 |
|
||||
6 | 6 |
|
||||
7 | 7 |
|
||||
8 | 8 |
|
||||
9 | 9 |
|
||||
10 | 10 |
|
||||
11 | 11 | 11
|
||||
12 | 12 | 12
|
||||
13 | 13 | 13
|
||||
14 | 14 | 14
|
||||
15 | 15 | 15
|
||||
16 | 16 | 16
|
||||
17 | 17 | 17
|
||||
18 | 18 | 18
|
||||
20 | 20 | 20
|
||||
21 | 21 | 21
|
||||
22 | 22 | 22
|
||||
23 | 23 | 23
|
||||
23 | 23 |
|
||||
24 | 24 | 24
|
||||
25 | 25 | 25
|
||||
25 | 25 |
|
||||
26 | 26 | 26
|
||||
27 | 27 | 27
|
||||
28 | 28 | 28
|
||||
29 | 29 | 29
|
||||
29 | 29 |
|
||||
30 | 30 | 30
|
||||
(25 rows)
|
||||
(17 rows)
|
||||
|
||||
-- inner (local) join + 2 shards left (dual partition) join should error out
|
||||
SELECT
|
||||
|
@ -633,8 +583,8 @@ FROM
|
|||
multi_outer_join_third t1
|
||||
INNER JOIN multi_outer_join_left l1 ON (l1.l_custkey = t1.t_custkey)
|
||||
LEFT JOIN multi_outer_join_right r1 ON (l1.l_custkey = r1.r_custkey);
|
||||
ERROR: cannot perform distributed planning on this query
|
||||
DETAIL: Shards of relations in outer join queries must have 1-to-1 shard partitioning
|
||||
ERROR: cannot run outer join query if join is not on the partition column
|
||||
DETAIL: Outer joins requiring repartitioning are not supported.
|
||||
-- inner (local) join + 2 shards left (dual partition) join should error out
|
||||
SELECT
|
||||
l_custkey, t_custkey, r_custkey
|
||||
|
@ -645,32 +595,24 @@ FROM
|
|||
LOG: join order: [ "multi_outer_join_left" ][ broadcast join "multi_outer_join_third" ][ local partition join "multi_outer_join_right" ]
|
||||
l_custkey | t_custkey | r_custkey
|
||||
-----------+-----------+-----------
|
||||
1 | 1 |
|
||||
2 | 2 |
|
||||
3 | 3 |
|
||||
4 | 4 |
|
||||
5 | 5 |
|
||||
6 | 6 |
|
||||
7 | 7 |
|
||||
8 | 8 |
|
||||
9 | 9 |
|
||||
10 | 10 |
|
||||
11 | 11 | 11
|
||||
12 | 12 | 12
|
||||
13 | 13 | 13
|
||||
14 | 14 | 14
|
||||
15 | 15 | 15
|
||||
16 | 16 | 16
|
||||
17 | 17 | 17
|
||||
18 | 18 | 18
|
||||
20 | 20 | 20
|
||||
21 | 21 | 21
|
||||
22 | 22 | 22
|
||||
23 | 23 | 23
|
||||
23 | 23 |
|
||||
24 | 24 | 24
|
||||
25 | 25 | 25
|
||||
25 | 25 |
|
||||
26 | 26 | 26
|
||||
27 | 27 | 27
|
||||
28 | 28 | 28
|
||||
29 | 29 | 29
|
||||
29 | 29 |
|
||||
30 | 30 | 30
|
||||
(25 rows)
|
||||
(17 rows)
|
||||
|
||||
-- inner (broadcast) join + 2 shards left (local) + anti join should work
|
||||
SELECT
|
||||
|
@ -684,17 +626,10 @@ WHERE
|
|||
LOG: join order: [ "multi_outer_join_left" ][ broadcast join "multi_outer_join_third" ][ local partition join "multi_outer_join_right" ]
|
||||
l_custkey | t_custkey | r_custkey
|
||||
-----------+-----------+-----------
|
||||
1 | 1 |
|
||||
2 | 2 |
|
||||
3 | 3 |
|
||||
4 | 4 |
|
||||
5 | 5 |
|
||||
6 | 6 |
|
||||
7 | 7 |
|
||||
8 | 8 |
|
||||
9 | 9 |
|
||||
10 | 10 |
|
||||
(10 rows)
|
||||
23 | 23 |
|
||||
25 | 25 |
|
||||
29 | 29 |
|
||||
(3 rows)
|
||||
|
||||
-- Test joinExpr aliases by performing an outer-join.
|
||||
SELECT
|
||||
|
@ -719,15 +654,12 @@ LOG: join order: [ "multi_outer_join_right" ][ local partition join "multi_oute
|
|||
20
|
||||
21
|
||||
22
|
||||
23
|
||||
24
|
||||
25
|
||||
26
|
||||
27
|
||||
28
|
||||
29
|
||||
30
|
||||
(20 rows)
|
||||
(17 rows)
|
||||
|
||||
-- flattened out subqueries with outer joins are not supported
|
||||
SELECT
|
||||
|
@ -755,20 +687,20 @@ SELECT
|
|||
min(l_custkey), max(l_custkey)
|
||||
FROM
|
||||
multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_custkey = r_custkey);
|
||||
ERROR: cannot perform distributed planning on this query
|
||||
DETAIL: Shards of relations in outer join queries must have 1-to-1 shard partitioning
|
||||
ERROR: cannot run outer join query if join is not on the partition column
|
||||
DETAIL: Outer joins requiring repartitioning are not supported.
|
||||
SELECT
|
||||
min(l_custkey), max(l_custkey)
|
||||
FROM
|
||||
multi_outer_join_left a RIGHT JOIN multi_outer_join_right b ON (l_custkey = r_custkey);
|
||||
ERROR: cannot perform distributed planning on this query
|
||||
DETAIL: Shards of relations in outer join queries must have 1-to-1 shard partitioning
|
||||
ERROR: cannot run outer join query if join is not on the partition column
|
||||
DETAIL: Outer joins requiring repartitioning are not supported.
|
||||
SELECT
|
||||
min(l_custkey), max(l_custkey)
|
||||
FROM
|
||||
multi_outer_join_left a FULL JOIN multi_outer_join_right b ON (l_custkey = r_custkey);
|
||||
ERROR: cannot perform distributed planning on this query
|
||||
DETAIL: Shards of relations in outer join queries must have 1-to-1 shard partitioning
|
||||
ERROR: cannot run outer join query if join is not on the partition column
|
||||
DETAIL: Outer joins requiring repartitioning are not supported.
|
||||
SELECT
|
||||
t_custkey
|
||||
FROM
|
||||
|
@ -776,33 +708,23 @@ FROM
|
|||
LEFT OUTER JOIN multi_outer_join_left l1 ON (l1.l_custkey = r1.r_custkey)) AS
|
||||
test(c_custkey, c_nationkey)
|
||||
INNER JOIN multi_outer_join_third t1 ON (test.c_custkey = t1.t_custkey);
|
||||
ERROR: cannot perform distributed planning on this query
|
||||
DETAIL: Shards of relations in outer join queries must have 1-to-1 shard partitioning
|
||||
ERROR: cannot run outer join query if join is not on the partition column
|
||||
DETAIL: Outer joins requiring repartitioning are not supported.
|
||||
-- simple test to ensure anti-joins work with hash-partitioned tables
|
||||
CREATE TABLE left_values(val int);
|
||||
SELECT master_create_distributed_table('left_values', 'val', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('left_values', 16, 1);
|
||||
master_create_worker_shards
|
||||
-----------------------------
|
||||
SET citus.shard_count to 16;
|
||||
SET citus.shard_replication_factor to 1;
|
||||
SELECT create_distributed_table('left_values', 'val');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
\copy left_values from stdin
|
||||
CREATE TABLE right_values(val int);
|
||||
SELECT master_create_distributed_table('right_values', 'val', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('right_values', 16, 1);
|
||||
master_create_worker_shards
|
||||
-----------------------------
|
||||
SELECT create_distributed_table('right_values', 'val');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
|
|
@ -259,7 +259,7 @@ push(@pgOptions, '-c', "wal_level=logical");
|
|||
|
||||
# Citus options set for the tests
|
||||
push(@pgOptions, '-c', "citus.shard_count=4");
|
||||
push(@pgOptions, '-c', "citus.shard_max_size=300kB");
|
||||
push(@pgOptions, '-c', "citus.shard_max_size=1500kB");
|
||||
push(@pgOptions, '-c', "citus.max_running_tasks_per_node=4");
|
||||
push(@pgOptions, '-c', "citus.expire_cached_shards=on");
|
||||
push(@pgOptions, '-c', "citus.task_tracker_delay=10ms");
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
--
|
||||
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 360000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 100000;
|
||||
|
||||
-- Create new table definitions for use in testing in distributed planning and
|
||||
-- execution functionality. Also create indexes to boost performance.
|
||||
|
|
|
@ -6,6 +6,8 @@
|
|||
|
||||
|
||||
SET citus.next_shard_id TO 630000;
|
||||
SET citus.shard_count to 4;
|
||||
SET citus.shard_replication_factor to 1;
|
||||
|
||||
-- Create a table partitioned on integer column and update partition type to
|
||||
-- hash. Then load data into this table and update shard min max values with
|
||||
|
@ -22,8 +24,7 @@ CREATE TABLE orders_hash_partitioned (
|
|||
o_clerk char(15),
|
||||
o_shippriority integer,
|
||||
o_comment varchar(79) );
|
||||
SELECT master_create_distributed_table('orders_hash_partitioned', 'o_orderkey', 'hash');
|
||||
SELECT master_create_worker_shards('orders_hash_partitioned', 4, 1);
|
||||
SELECT create_distributed_table('orders_hash_partitioned', 'o_orderkey');
|
||||
|
||||
SET client_min_messages TO DEBUG2;
|
||||
|
||||
|
|
|
@ -11,6 +11,8 @@ SET citus.explain_distributed_queries TO off;
|
|||
SET citus.log_multi_join_order TO TRUE;
|
||||
SET citus.task_executor_type = 'task-tracker'; -- can't explain all queries otherwise
|
||||
SET client_min_messages TO DEBUG2;
|
||||
SET citus.shard_count to 2;
|
||||
SET citus.shard_replication_factor to 1;
|
||||
|
||||
-- Create new table definitions for use in testing in distributed planning and
|
||||
-- execution functionality. Also create indexes to boost performance.
|
||||
|
@ -33,8 +35,7 @@ CREATE TABLE lineitem_hash (
|
|||
l_shipmode char(10) not null,
|
||||
l_comment varchar(44) not null,
|
||||
PRIMARY KEY(l_orderkey, l_linenumber) );
|
||||
SELECT master_create_distributed_table('lineitem_hash', 'l_orderkey', 'hash');
|
||||
SELECT master_create_worker_shards('lineitem_hash', 2, 1);
|
||||
SELECT create_distributed_table('lineitem_hash', 'l_orderkey');
|
||||
|
||||
CREATE INDEX lineitem_hash_time_index ON lineitem_hash (l_shipdate);
|
||||
|
||||
|
@ -49,8 +50,7 @@ CREATE TABLE orders_hash (
|
|||
o_shippriority integer not null,
|
||||
o_comment varchar(79) not null,
|
||||
PRIMARY KEY(o_orderkey) );
|
||||
SELECT master_create_distributed_table('orders_hash', 'o_orderkey', 'hash');
|
||||
SELECT master_create_worker_shards('orders_hash', 2, 1);
|
||||
SELECT create_distributed_table('orders_hash', 'o_orderkey');
|
||||
|
||||
CREATE TABLE customer_hash (
|
||||
c_custkey integer not null,
|
||||
|
@ -61,8 +61,7 @@ CREATE TABLE customer_hash (
|
|||
c_acctbal decimal(15,2) not null,
|
||||
c_mktsegment char(10) not null,
|
||||
c_comment varchar(117) not null);
|
||||
SELECT master_create_distributed_table('customer_hash', 'c_custkey', 'hash');
|
||||
SELECT master_create_worker_shards('customer_hash', 2, 1);
|
||||
SELECT create_distributed_table('customer_hash', 'c_custkey');
|
||||
|
||||
-- The following query checks that we can correctly handle self-joins
|
||||
|
||||
|
|
|
@ -699,6 +699,10 @@ DROP TRIGGER reject_bad_reference ON reference_modifying_xacts_1200006;
|
|||
-- now create a hash distributed table and run tests
|
||||
-- including both the reference table and the hash
|
||||
-- distributed table
|
||||
|
||||
-- To prevent colocating a hash table with append table
|
||||
DELETE FROM pg_dist_colocation WHERE colocationid = 100001;
|
||||
|
||||
SET citus.next_shard_id TO 1200007;
|
||||
SET citus.shard_count = 4;
|
||||
SET citus.shard_replication_factor = 1;
|
||||
|
|
|
@ -20,7 +20,7 @@ CREATE OPERATOR = (
|
|||
LEFTARG = test_udt,
|
||||
RIGHTARG = test_udt,
|
||||
PROCEDURE = equal_test_udt_function,
|
||||
COMMUTATOR = =,
|
||||
COMMUTATOR = =,
|
||||
HASHES
|
||||
);
|
||||
|
||||
|
@ -50,15 +50,15 @@ FUNCTION 1 test_udt_hash(test_udt);
|
|||
-- END type creation
|
||||
|
||||
CREATE TABLE repartition_udt (
|
||||
pk integer not null,
|
||||
udtcol test_udt,
|
||||
txtcol text
|
||||
pk integer not null,
|
||||
udtcol test_udt,
|
||||
txtcol text
|
||||
);
|
||||
|
||||
CREATE TABLE repartition_udt_other (
|
||||
pk integer not null,
|
||||
udtcol test_udt,
|
||||
txtcol text
|
||||
pk integer not null,
|
||||
udtcol test_udt,
|
||||
txtcol text
|
||||
);
|
||||
|
||||
-- Connect directly to a worker, create and drop the type, then
|
||||
|
@ -86,7 +86,7 @@ CREATE OPERATOR = (
|
|||
LEFTARG = test_udt,
|
||||
RIGHTARG = test_udt,
|
||||
PROCEDURE = equal_test_udt_function,
|
||||
COMMUTATOR = =,
|
||||
COMMUTATOR = =,
|
||||
HASHES
|
||||
);
|
||||
|
||||
|
@ -133,7 +133,7 @@ CREATE OPERATOR = (
|
|||
LEFTARG = test_udt,
|
||||
RIGHTARG = test_udt,
|
||||
PROCEDURE = equal_test_udt_function,
|
||||
COMMUTATOR = =,
|
||||
COMMUTATOR = =,
|
||||
HASHES
|
||||
);
|
||||
|
||||
|
@ -197,7 +197,7 @@ SET client_min_messages = LOG;
|
|||
-- manually make the query router plannable.
|
||||
SELECT * FROM repartition_udt JOIN repartition_udt_other
|
||||
ON repartition_udt.pk = repartition_udt_other.pk
|
||||
WHERE repartition_udt.pk = 1;
|
||||
WHERE repartition_udt.pk = 1;
|
||||
|
||||
-- Query that should result in a repartition join on UDT column.
|
||||
SET citus.large_table_shard_count = 1;
|
||||
|
@ -206,12 +206,12 @@ SET citus.log_multi_join_order = true;
|
|||
|
||||
EXPLAIN SELECT * FROM repartition_udt JOIN repartition_udt_other
|
||||
ON repartition_udt.udtcol = repartition_udt_other.udtcol
|
||||
WHERE repartition_udt.pk > 1;
|
||||
WHERE repartition_udt.pk > 1;
|
||||
|
||||
SELECT * FROM repartition_udt JOIN repartition_udt_other
|
||||
ON repartition_udt.udtcol = repartition_udt_other.udtcol
|
||||
WHERE repartition_udt.pk > 1
|
||||
ORDER BY repartition_udt.pk;
|
||||
|
||||
WHERE repartition_udt.pk > 1
|
||||
ORDER BY repartition_udt.pk;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - - :worker_2_port
|
||||
|
|
|
@ -21,6 +21,8 @@ SET citus.task_executor_type TO 'real-time';
|
|||
-- Change configuration to treat lineitem and orders tables as large
|
||||
|
||||
SET citus.large_table_shard_count TO 2;
|
||||
SET citus.log_multi_join_order to true;
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
|
||||
SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290000;
|
||||
SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290001;
|
||||
|
@ -35,7 +37,8 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
|||
WHERE l_orderkey = o_orderkey;
|
||||
|
||||
-- Now set the minimum value for a shard to null. Then check that we don't apply
|
||||
-- partition or join pruning for the shard with null min value.
|
||||
-- partition or join pruning for the shard with null min value. Since it is not
|
||||
-- supported with single-repartition join, dual-repartition has been used.
|
||||
|
||||
UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 290000;
|
||||
|
||||
|
@ -44,10 +47,11 @@ SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 903
|
|||
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_orderkey = o_orderkey;
|
||||
WHERE l_partkey = o_custkey;
|
||||
|
||||
-- Next, set the maximum value for another shard to null. Then check that we
|
||||
-- don't apply partition or join pruning for this other shard either.
|
||||
-- don't apply partition or join pruning for this other shard either. Since it
|
||||
-- is not supported with single-repartition join, dual-repartition has been used.
|
||||
|
||||
UPDATE pg_dist_shard SET shardmaxvalue = NULL WHERE shardid = 290001;
|
||||
|
||||
|
@ -56,10 +60,11 @@ SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 903
|
|||
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_orderkey = o_orderkey;
|
||||
WHERE l_partkey = o_custkey;
|
||||
|
||||
-- Last, set the minimum value to 0 and check that we don't treat it as null. We
|
||||
-- should apply partition and join pruning for this shard now.
|
||||
-- should apply partition and join pruning for this shard now. Since it is not
|
||||
-- supported with single-repartition join, dual-repartition has been used.
|
||||
|
||||
UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = 290000;
|
||||
|
||||
|
@ -68,11 +73,11 @@ SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 903
|
|||
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_orderkey = o_orderkey;
|
||||
WHERE l_partkey = o_custkey;
|
||||
|
||||
-- Set minimum and maximum values for two shards back to their original values
|
||||
|
||||
UPDATE pg_dist_shard SET shardminvalue = '1' WHERE shardid = 290000;
|
||||
UPDATE pg_dist_shard SET shardmaxvalue = '4964' WHERE shardid = 290001;
|
||||
UPDATE pg_dist_shard SET shardmaxvalue = '14947' WHERE shardid = 290001;
|
||||
|
||||
SET client_min_messages TO NOTICE;
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
SET citus.next_shard_id TO 770000;
|
||||
|
||||
-- Adding additional l_orderkey = 1 to make this query not router executable
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1;
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1 ORDER BY 1,2;
|
||||
|
||||
-- We use the l_linenumber field for the following aggregations. We need to use
|
||||
-- an integer type, as aggregations on numerics or big integers return numerics
|
||||
|
|
|
@ -559,6 +559,8 @@ SELECT master_apply_delete_command('DELETE FROM nation_append') ;
|
|||
-- create necessary objects and load data to them
|
||||
CREATE SCHEMA test_schema_support_join_1;
|
||||
CREATE SCHEMA test_schema_support_join_2;
|
||||
SET citus.shard_count to 4;
|
||||
SET citus.shard_replication_factor to 1;
|
||||
|
||||
CREATE TABLE test_schema_support_join_1.nation_hash (
|
||||
n_nationkey integer not null,
|
||||
|
@ -578,8 +580,7 @@ CREATE TABLE test_schema_support_join_2.nation_hash (
|
|||
n_regionkey integer not null,
|
||||
n_comment varchar(152));
|
||||
|
||||
SELECT master_create_distributed_table('test_schema_support_join_1.nation_hash', 'n_nationkey', 'hash');
|
||||
SELECT master_create_worker_shards('test_schema_support_join_1.nation_hash', 4, 1);
|
||||
SELECT create_distributed_table('test_schema_support_join_1.nation_hash', 'n_nationkey');
|
||||
|
||||
\copy test_schema_support_join_1.nation_hash FROM STDIN with delimiter '|';
|
||||
0|ALGERIA|0|haggle. carefully final deposits detect slyly agai
|
||||
|
@ -590,8 +591,7 @@ SELECT master_create_worker_shards('test_schema_support_join_1.nation_hash', 4,
|
|||
5|ETHIOPIA|0|ven packages wake quickly. regu
|
||||
\.
|
||||
|
||||
SELECT master_create_distributed_table('test_schema_support_join_1.nation_hash_2', 'n_nationkey', 'hash');
|
||||
SELECT master_create_worker_shards('test_schema_support_join_1.nation_hash_2', 4, 1);
|
||||
SELECT create_distributed_table('test_schema_support_join_1.nation_hash_2', 'n_nationkey');
|
||||
|
||||
\copy test_schema_support_join_1.nation_hash_2 FROM STDIN with delimiter '|';
|
||||
0|ALGERIA|0|haggle. carefully final deposits detect slyly agai
|
||||
|
@ -602,8 +602,7 @@ SELECT master_create_worker_shards('test_schema_support_join_1.nation_hash_2', 4
|
|||
5|ETHIOPIA|0|ven packages wake quickly. regu
|
||||
\.
|
||||
|
||||
SELECT master_create_distributed_table('test_schema_support_join_2.nation_hash', 'n_nationkey', 'hash');
|
||||
SELECT master_create_worker_shards('test_schema_support_join_2.nation_hash', 4, 1);
|
||||
SELECT create_distributed_table('test_schema_support_join_2.nation_hash', 'n_nationkey');
|
||||
|
||||
\copy test_schema_support_join_2.nation_hash FROM STDIN with delimiter '|';
|
||||
0|ALGERIA|0|haggle. carefully final deposits detect slyly agai
|
||||
|
@ -783,3 +782,5 @@ SELECT sum(result::int) FROM run_command_on_shards('run_test_schema.test_table',
|
|||
|
||||
-- Clean up the created schema
|
||||
DROP SCHEMA run_test_schema CASCADE;
|
||||
DROP SCHEMA test_schema_support_join_1 CASCADE;
|
||||
DROP SCHEMA test_schema_support_join_2 CASCADE;
|
||||
|
|
Loading…
Reference in New Issue