Replace \stage With \copy on Regression Tests

Fixes #547

This change removes all references to \stage in the regression tests
and puts \COPY instead. Doing so changed shard counts, min/max
values on some test tables (lineitem, orders, etc.).
pull/630/head
Eren Basak 2016-06-27 07:23:08 +03:00 committed by Jason Petersen
parent 010cbf16fc
commit b513f1c911
No known key found for this signature in database
GPG Key ID: 9F1D3510D110ABA9
35 changed files with 478 additions and 325 deletions

View File

@ -98,10 +98,10 @@ SELECT l_quantity, count(*), avg(l_extendedprice), array_agg(l_orderkey) FROM li
GROUP BY l_quantity ORDER BY l_quantity;
l_quantity | count | avg | array_agg
------------+-------+-----------------------+--------------------------------------------------------------------------------------------------
1.00 | 17 | 1477.1258823529411765 | {5543,5633,5634,5698,5766,5856,5857,5986,8997,9026,9158,9184,9220,9222,9348,9383,9476}
2.00 | 19 | 3078.4242105263157895 | {5506,5540,5573,5669,5703,5730,5798,5831,5893,5920,5923,9030,9058,9123,9124,9188,9344,9441,9476}
3.00 | 14 | 4714.0392857142857143 | {5509,5543,5605,5606,5827,9124,9157,9184,9223,9254,9349,9414,9475,9477}
4.00 | 19 | 5929.7136842105263158 | {5504,5507,5508,5511,5538,5764,5766,5826,5829,5862,5959,5985,9091,9120,9281,9347,9382,9440,9473}
1.00 | 17 | 1477.1258823529411765 | {8997,9026,9158,9184,9220,9222,9348,9383,9476,5543,5633,5634,5698,5766,5856,5857,5986}
2.00 | 19 | 3078.4242105263157895 | {9030,9058,9123,9124,9188,9344,9441,9476,5506,5540,5573,5669,5703,5730,5798,5831,5893,5920,5923}
3.00 | 14 | 4714.0392857142857143 | {9124,9157,9184,9223,9254,9349,9414,9475,9477,5509,5543,5605,5606,5827}
4.00 | 19 | 5929.7136842105263158 | {9091,9120,9281,9347,9382,9440,9473,5504,5507,5508,5511,5538,5764,5766,5826,5829,5862,5959,5985}
(4 rows)
SELECT l_quantity, array_agg(extract (month FROM o_orderdate)) AS my_month
@ -109,10 +109,10 @@ SELECT l_quantity, array_agg(extract (month FROM o_orderdate)) AS my_month
AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity;
l_quantity | my_month
------------+------------------------------------------------
1.00 | {9,5,7,5,9,11,11,4,7,7,4,7,4,2,6,3,5}
2.00 | {11,10,8,5,5,12,3,11,7,11,5,7,6,6,10,1,12,6,5}
3.00 | {4,9,8,11,7,10,6,7,8,5,8,9,11,3}
4.00 | {1,5,6,11,12,10,9,6,1,2,5,1,11,6,2,8,2,6,10}
1.00 | {7,7,4,7,4,2,6,3,5,9,5,7,5,9,11,11,4}
2.00 | {7,6,6,10,1,12,6,5,11,10,8,5,5,12,3,11,7,11,5}
3.00 | {10,6,7,8,5,8,9,11,3,4,9,8,11,7}
4.00 | {11,6,2,8,2,6,10,1,5,6,11,12,10,9,6,1,2,5,1}
(4 rows)
SELECT l_quantity, array_agg(l_orderkey * 2 + 1) FROM lineitem WHERE l_quantity < 5
@ -120,10 +120,10 @@ SELECT l_quantity, array_agg(l_orderkey * 2 + 1) FROM lineitem WHERE l_quantity
AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity;
l_quantity | array_agg
------------+---------------------------------------------
1.00 | {11269,11397,11713,11715,11973,18317,18445}
2.00 | {11847,18061,18247,18953}
1.00 | {18317,18445,11269,11397,11713,11715,11973}
2.00 | {18061,18247,18953,11847}
3.00 | {18249,18315,18699,18951,18955}
4.00 | {11653,11659,18241,18765}
4.00 | {18241,18765,11653,11659}
(4 rows)
-- Check that we can execute array_agg() with an expression containing NULL values

View File

@ -390,7 +390,7 @@ ORDER BY
customer_keys.o_custkey DESC
LIMIT 10 OFFSET 20;
DEBUG: push down of limit count: 30
DEBUG: building index "pg_toast_16992_index" on table "pg_toast_16992"
DEBUG: building index "pg_toast_17021_index" on table "pg_toast_17021"
o_custkey | total_order_count
-----------+-------------------
1466 | 1

View File

@ -34,8 +34,8 @@ EXPLAIN (COSTS FALSE, FORMAT TEXT)
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
Distributed Query into pg_merge_job_570000
Executor: Real-Time
Task Count: 6
Tasks Shown: One of 6
Task Count: 8
Tasks Shown: One of 8
-> Task
Node: host=localhost port=57637 dbname=regression
-> HashAggregate
@ -55,8 +55,8 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
{
"Executor": "Real-Time",
"Job": {
"Task Count": 6,
"Tasks Shown": "One of 6",
"Task Count": 8,
"Tasks Shown": "One of 8",
"Tasks": [
{
"Node": "host=localhost port=57637 dbname=regression",
@ -122,8 +122,8 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
<Distributed-Query>
<Executor>Real-Time</Executor>
<Job>
<Task-Count>6</Task-Count>
<Tasks-Shown>One of 6</Tasks-Shown>
<Task-Count>8</Task-Count>
<Tasks-Shown>One of 8</Tasks-Shown>
<Tasks>
<Task>
<Node>host=localhost port=57637 dbname=regression</Node>
@ -193,8 +193,8 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
- Executor: "Real-Time"
Job:
Task Count: 6
Tasks Shown: "One of 6"
Task Count: 8
Tasks Shown: "One of 8"
Tasks:
- Node: "host=localhost port=57637 dbname=regression"
Remote Plan:
@ -232,8 +232,8 @@ EXPLAIN (COSTS FALSE, FORMAT TEXT)
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
Distributed Query into pg_merge_job_570006
Executor: Real-Time
Task Count: 6
Tasks Shown: One of 6
Task Count: 8
Tasks Shown: One of 8
-> Task
Node: host=localhost port=57637 dbname=regression
-> HashAggregate
@ -250,8 +250,8 @@ EXPLAIN (COSTS FALSE, VERBOSE TRUE)
SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem;
Distributed Query into pg_merge_job_570007
Executor: Real-Time
Task Count: 6
Tasks Shown: One of 6
Task Count: 8
Tasks Shown: One of 8
-> Task
Node: host=localhost port=57637 dbname=regression
-> Aggregate
@ -270,8 +270,8 @@ EXPLAIN (COSTS FALSE)
ORDER BY l_quantity LIMIT 10;
Distributed Query into pg_merge_job_570008
Executor: Real-Time
Task Count: 6
Tasks Shown: One of 6
Task Count: 8
Tasks Shown: One of 8
-> Task
Node: host=localhost port=57637 dbname=regression
-> Limit
@ -282,7 +282,7 @@ Distributed Query into pg_merge_job_570008
-> Seq Scan on lineitem_290000 lineitem
Filter: (l_quantity < 5.0)
-> Hash
-> Seq Scan on orders_290006 orders
-> Seq Scan on orders_290008 orders
Master Query
-> Limit
-> Sort
@ -357,8 +357,8 @@ EXPLAIN (COSTS FALSE)
SELECT * FROM lineitem;
Distributed Query into pg_merge_job_570012
Executor: Real-Time
Task Count: 6
Tasks Shown: One of 6
Task Count: 8
Tasks Shown: One of 8
-> Task
Node: host=localhost port=57637 dbname=regression
-> Seq Scan on lineitem_290000 lineitem
@ -370,7 +370,7 @@ EXPLAIN (COSTS FALSE)
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
Distributed Query into pg_merge_job_570013
Executor: Real-Time
Task Count: 3
Task Count: 4
Tasks Shown: All
-> Task
Node: host=localhost port=57637 dbname=regression
@ -380,12 +380,17 @@ Distributed Query into pg_merge_job_570013
-> Task
Node: host=localhost port=57638 dbname=regression
-> Aggregate
-> Seq Scan on lineitem_290003 lineitem
-> Seq Scan on lineitem_290005 lineitem
Filter: (l_orderkey > 9030)
-> Task
Node: host=localhost port=57637 dbname=regression
-> Aggregate
-> Seq Scan on lineitem_290005 lineitem
-> Seq Scan on lineitem_290006 lineitem
Filter: (l_orderkey > 9030)
-> Task
Node: host=localhost port=57638 dbname=regression
-> Aggregate
-> Seq Scan on lineitem_290007 lineitem
Filter: (l_orderkey > 9030)
Master Query
-> Aggregate
@ -403,8 +408,8 @@ EXPLAIN (COSTS FALSE)
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
Distributed Query into pg_merge_job_570016
Executor: Task-Tracker
Task Count: 3
Tasks Shown: One of 3
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=57637 dbname=regression
-> Aggregate
@ -429,7 +434,7 @@ Distributed Query into pg_merge_job_570019
Map Task Count: 1
Merge Task Count: 1
-> MapMergeJob
Map Task Count: 6
Map Task Count: 8
Merge Task Count: 1
Master Query
-> Aggregate
@ -452,7 +457,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Merge Task Count": 1,
"Depended Jobs": [
{
"Map Task Count": 6,
"Map Task Count": 8,
"Merge Task Count": 1
}
]
@ -502,7 +507,7 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
<Merge-Task-Count>1</Merge-Task-Count>
<Depended-Jobs>
<MapMergeJob>
<Map-Task-Count>6</Map-Task-Count>
<Map-Task-Count>8</Map-Task-Count>
<Merge-Task-Count>1</Merge-Task-Count>
</MapMergeJob>
</Depended-Jobs>
@ -548,7 +553,7 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
- Map Task Count: 1
Merge Task Count: 1
Depended Jobs:
- Map Task Count: 6
- Map Task Count: 8
Merge Task Count: 1
Master Query:
- Plan:

View File

@ -99,34 +99,50 @@ SELECT master_create_worker_shards('customer_hash', 2, 1);
EXPLAIN SELECT l1.l_quantity FROM lineitem l1, lineitem l2
WHERE l1.l_orderkey = l2.l_orderkey AND l1.l_quantity > 5;
LOG: join order: [ "lineitem" ][ local partition join "lineitem" ]
DEBUG: join prunable for intervals [1,2496] and [2497,4964]
DEBUG: join prunable for intervals [1,2496] and [4965,5986]
DEBUG: join prunable for intervals [1,2496] and [8997,11554]
DEBUG: join prunable for intervals [1,2496] and [11554,13920]
DEBUG: join prunable for intervals [1,2496] and [13921,14947]
DEBUG: join prunable for intervals [2497,4964] and [1,2496]
DEBUG: join prunable for intervals [2497,4964] and [4965,5986]
DEBUG: join prunable for intervals [2497,4964] and [8997,11554]
DEBUG: join prunable for intervals [2497,4964] and [11554,13920]
DEBUG: join prunable for intervals [2497,4964] and [13921,14947]
DEBUG: join prunable for intervals [4965,5986] and [1,2496]
DEBUG: join prunable for intervals [4965,5986] and [2497,4964]
DEBUG: join prunable for intervals [4965,5986] and [8997,11554]
DEBUG: join prunable for intervals [4965,5986] and [11554,13920]
DEBUG: join prunable for intervals [4965,5986] and [13921,14947]
DEBUG: join prunable for intervals [8997,11554] and [1,2496]
DEBUG: join prunable for intervals [8997,11554] and [2497,4964]
DEBUG: join prunable for intervals [8997,11554] and [4965,5986]
DEBUG: join prunable for intervals [8997,11554] and [13921,14947]
DEBUG: join prunable for intervals [11554,13920] and [1,2496]
DEBUG: join prunable for intervals [11554,13920] and [2497,4964]
DEBUG: join prunable for intervals [11554,13920] and [4965,5986]
DEBUG: join prunable for intervals [11554,13920] and [13921,14947]
DEBUG: join prunable for intervals [13921,14947] and [1,2496]
DEBUG: join prunable for intervals [13921,14947] and [2497,4964]
DEBUG: join prunable for intervals [13921,14947] and [4965,5986]
DEBUG: join prunable for intervals [13921,14947] and [8997,11554]
DEBUG: join prunable for intervals [13921,14947] and [11554,13920]
DEBUG: join prunable for intervals [1,1509] and [2951,4455]
DEBUG: join prunable for intervals [1,1509] and [4480,5986]
DEBUG: join prunable for intervals [1,1509] and [8997,10560]
DEBUG: join prunable for intervals [1,1509] and [10560,12036]
DEBUG: join prunable for intervals [1,1509] and [12036,13473]
DEBUG: join prunable for intervals [1,1509] and [13473,14947]
DEBUG: join prunable for intervals [1509,4964] and [8997,10560]
DEBUG: join prunable for intervals [1509,4964] and [10560,12036]
DEBUG: join prunable for intervals [1509,4964] and [12036,13473]
DEBUG: join prunable for intervals [1509,4964] and [13473,14947]
DEBUG: join prunable for intervals [2951,4455] and [1,1509]
DEBUG: join prunable for intervals [2951,4455] and [4480,5986]
DEBUG: join prunable for intervals [2951,4455] and [8997,10560]
DEBUG: join prunable for intervals [2951,4455] and [10560,12036]
DEBUG: join prunable for intervals [2951,4455] and [12036,13473]
DEBUG: join prunable for intervals [2951,4455] and [13473,14947]
DEBUG: join prunable for intervals [4480,5986] and [1,1509]
DEBUG: join prunable for intervals [4480,5986] and [2951,4455]
DEBUG: join prunable for intervals [4480,5986] and [8997,10560]
DEBUG: join prunable for intervals [4480,5986] and [10560,12036]
DEBUG: join prunable for intervals [4480,5986] and [12036,13473]
DEBUG: join prunable for intervals [4480,5986] and [13473,14947]
DEBUG: join prunable for intervals [8997,10560] and [1,1509]
DEBUG: join prunable for intervals [8997,10560] and [1509,4964]
DEBUG: join prunable for intervals [8997,10560] and [2951,4455]
DEBUG: join prunable for intervals [8997,10560] and [4480,5986]
DEBUG: join prunable for intervals [8997,10560] and [12036,13473]
DEBUG: join prunable for intervals [8997,10560] and [13473,14947]
DEBUG: join prunable for intervals [10560,12036] and [1,1509]
DEBUG: join prunable for intervals [10560,12036] and [1509,4964]
DEBUG: join prunable for intervals [10560,12036] and [2951,4455]
DEBUG: join prunable for intervals [10560,12036] and [4480,5986]
DEBUG: join prunable for intervals [10560,12036] and [13473,14947]
DEBUG: join prunable for intervals [12036,13473] and [1,1509]
DEBUG: join prunable for intervals [12036,13473] and [1509,4964]
DEBUG: join prunable for intervals [12036,13473] and [2951,4455]
DEBUG: join prunable for intervals [12036,13473] and [4480,5986]
DEBUG: join prunable for intervals [12036,13473] and [8997,10560]
DEBUG: join prunable for intervals [13473,14947] and [1,1509]
DEBUG: join prunable for intervals [13473,14947] and [1509,4964]
DEBUG: join prunable for intervals [13473,14947] and [2951,4455]
DEBUG: join prunable for intervals [13473,14947] and [4480,5986]
DEBUG: join prunable for intervals [13473,14947] and [8997,10560]
DEBUG: join prunable for intervals [13473,14947] and [10560,12036]
QUERY PLAN
------------------------------------------------------------
explain statements for distributed queries are not enabled

View File

@ -11,12 +11,14 @@ SET client_min_messages TO DEBUG2;
SET citus.large_table_shard_count TO 2;
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
WHERE l_orderkey = o_orderkey;
DEBUG: join prunable for intervals [1,2496] and [8997,14946]
DEBUG: join prunable for intervals [2497,4964] and [8997,14946]
DEBUG: join prunable for intervals [4965,5986] and [8997,14946]
DEBUG: join prunable for intervals [8997,11554] and [1,5986]
DEBUG: join prunable for intervals [11554,13920] and [1,5986]
DEBUG: join prunable for intervals [13921,14947] and [1,5986]
DEBUG: join prunable for intervals [1,1509] and [8997,14946]
DEBUG: join prunable for intervals [1509,2951] and [8997,14946]
DEBUG: join prunable for intervals [2951,4455] and [8997,14946]
DEBUG: join prunable for intervals [4480,5986] and [8997,14946]
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
sum | avg
-------+--------------------
36086 | 3.0076679446574429
@ -27,9 +29,11 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
DEBUG: predicate pruning for shardId 290000
DEBUG: predicate pruning for shardId 290001
DEBUG: predicate pruning for shardId 290002
DEBUG: join prunable for intervals [8997,11554] and [1,5986]
DEBUG: join prunable for intervals [11554,13920] and [1,5986]
DEBUG: join prunable for intervals [13921,14947] and [1,5986]
DEBUG: predicate pruning for shardId 290003
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
sum | avg
-------+--------------------
17996 | 3.0194630872483221
@ -45,6 +49,8 @@ DEBUG: predicate pruning for shardId 290002
DEBUG: predicate pruning for shardId 290003
DEBUG: predicate pruning for shardId 290004
DEBUG: predicate pruning for shardId 290005
DEBUG: predicate pruning for shardId 290006
DEBUG: predicate pruning for shardId 290007
sum | avg
-----+-----
|
@ -58,10 +64,12 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
DEBUG: predicate pruning for shardId 290000
DEBUG: predicate pruning for shardId 290001
DEBUG: predicate pruning for shardId 290002
DEBUG: predicate pruning for shardId 290007
DEBUG: join prunable for intervals [8997,11554] and [1,5986]
DEBUG: join prunable for intervals [11554,13920] and [1,5986]
DEBUG: join prunable for intervals [13921,14947] and [1,5986]
DEBUG: predicate pruning for shardId 290003
DEBUG: predicate pruning for shardId 290009
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
sum | avg
-----+-----
|

View File

@ -45,40 +45,48 @@ GROUP BY
ORDER BY
l_partkey, o_orderkey;
DEBUG: StartTransactionCommand
DEBUG: join prunable for intervals [1,2496] and [8997,14946]
DEBUG: join prunable for intervals [2497,4964] and [8997,14946]
DEBUG: join prunable for intervals [4965,5986] and [8997,14946]
DEBUG: join prunable for intervals [8997,11554] and [1,5986]
DEBUG: join prunable for intervals [11554,13920] and [1,5986]
DEBUG: join prunable for intervals [13921,14947] and [1,5986]
DEBUG: join prunable for intervals [1,1509] and [8997,14946]
DEBUG: join prunable for intervals [1509,4964] and [8997,14946]
DEBUG: join prunable for intervals [2951,4455] and [8997,14946]
DEBUG: join prunable for intervals [4480,5986] and [8997,14946]
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
DEBUG: generated sql query for job 1250 and task 3
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290000 lineitem JOIN orders_290006 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290000 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DEBUG: generated sql query for job 1250 and task 6
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290001 lineitem JOIN orders_290006 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290001 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DEBUG: generated sql query for job 1250 and task 9
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290002 lineitem JOIN orders_290006 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290002 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DEBUG: generated sql query for job 1250 and task 12
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290003 lineitem JOIN orders_290007 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290003 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DEBUG: generated sql query for job 1250 and task 15
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290004 lineitem JOIN orders_290007 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290004 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DEBUG: generated sql query for job 1250 and task 18
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290005 lineitem JOIN orders_290007 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290005 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DEBUG: generated sql query for job 1250 and task 21
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290006 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DEBUG: generated sql query for job 1250 and task 24
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290007 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DEBUG: assigned task 3 to node localhost:57637
DEBUG: assigned task 6 to node localhost:57638
DEBUG: assigned task 9 to node localhost:57637
DEBUG: assigned task 12 to node localhost:57638
DEBUG: assigned task 15 to node localhost:57637
DEBUG: assigned task 18 to node localhost:57638
DEBUG: assigned task 21 to node localhost:57637
DEBUG: assigned task 24 to node localhost:57638
DEBUG: join prunable for intervals [1,1000] and [6001,7000]
DEBUG: join prunable for intervals [6001,7000] and [1,1000]
DEBUG: generated sql query for job 1251 and task 3
DETAIL: query string: "SELECT "pg_merge_job_1250.task_000019".intermediate_column_1250_0, "pg_merge_job_1250.task_000019".intermediate_column_1250_1, "pg_merge_job_1250.task_000019".intermediate_column_1250_2, "pg_merge_job_1250.task_000019".intermediate_column_1250_3, "pg_merge_job_1250.task_000019".intermediate_column_1250_4 FROM (pg_merge_job_1250.task_000019 "pg_merge_job_1250.task_000019" JOIN part_290010 part ON (("pg_merge_job_1250.task_000019".intermediate_column_1250_0 = part.p_partkey))) WHERE (part.p_size > 8)"
DETAIL: query string: "SELECT "pg_merge_job_1250.task_000025".intermediate_column_1250_0, "pg_merge_job_1250.task_000025".intermediate_column_1250_1, "pg_merge_job_1250.task_000025".intermediate_column_1250_2, "pg_merge_job_1250.task_000025".intermediate_column_1250_3, "pg_merge_job_1250.task_000025".intermediate_column_1250_4 FROM (pg_merge_job_1250.task_000025 "pg_merge_job_1250.task_000025" JOIN part_290012 part ON (("pg_merge_job_1250.task_000025".intermediate_column_1250_0 = part.p_partkey))) WHERE (part.p_size > 8)"
DEBUG: generated sql query for job 1251 and task 6
DETAIL: query string: "SELECT "pg_merge_job_1250.task_000026".intermediate_column_1250_0, "pg_merge_job_1250.task_000026".intermediate_column_1250_1, "pg_merge_job_1250.task_000026".intermediate_column_1250_2, "pg_merge_job_1250.task_000026".intermediate_column_1250_3, "pg_merge_job_1250.task_000026".intermediate_column_1250_4 FROM (pg_merge_job_1250.task_000026 "pg_merge_job_1250.task_000026" JOIN part_280002 part ON (("pg_merge_job_1250.task_000026".intermediate_column_1250_0 = part.p_partkey))) WHERE (part.p_size > 8)"
DETAIL: query string: "SELECT "pg_merge_job_1250.task_000034".intermediate_column_1250_0, "pg_merge_job_1250.task_000034".intermediate_column_1250_1, "pg_merge_job_1250.task_000034".intermediate_column_1250_2, "pg_merge_job_1250.task_000034".intermediate_column_1250_3, "pg_merge_job_1250.task_000034".intermediate_column_1250_4 FROM (pg_merge_job_1250.task_000034 "pg_merge_job_1250.task_000034" JOIN part_280002 part ON (("pg_merge_job_1250.task_000034".intermediate_column_1250_0 = part.p_partkey))) WHERE (part.p_size > 8)"
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 19
DETAIL: Creating dependency on merge taskId 25
DEBUG: pruning merge fetch taskId 4
DETAIL: Creating dependency on merge taskId 26
DETAIL: Creating dependency on merge taskId 34
DEBUG: assigned task 6 to node localhost:57637
DEBUG: assigned task 3 to node localhost:57638
DEBUG: join prunable for intervals [1,1000] and [1001,2000]
@ -88,7 +96,7 @@ DEBUG: join prunable for intervals [1001,2000] and [6001,7000]
DEBUG: join prunable for intervals [6001,7000] and [1,1000]
DEBUG: join prunable for intervals [6001,7000] and [1001,2000]
DEBUG: generated sql query for job 1252 and task 3
DETAIL: query string: "SELECT "pg_merge_job_1251.task_000007".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000007".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000007 "pg_merge_job_1251.task_000007" JOIN customer_290008 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000007".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000007".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000007".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000007".intermediate_column_1251_0, "pg_merge_job_1251.task_000007".intermediate_column_1251_1"
DETAIL: query string: "SELECT "pg_merge_job_1251.task_000007".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000007".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000007 "pg_merge_job_1251.task_000007" JOIN customer_290010 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000007".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000007".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000007".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000007".intermediate_column_1251_0, "pg_merge_job_1251.task_000007".intermediate_column_1251_1"
DEBUG: generated sql query for job 1252 and task 6
DETAIL: query string: "SELECT "pg_merge_job_1251.task_000010".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000010".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000010 "pg_merge_job_1251.task_000010" JOIN customer_280001 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000010".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000010".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000010".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000010".intermediate_column_1251_0, "pg_merge_job_1251.task_000010".intermediate_column_1251_1"
DEBUG: generated sql query for job 1252 and task 9
@ -166,16 +174,22 @@ DEBUG: generated sql query for job 1253 and task 10
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290004 lineitem WHERE (l_quantity < 5.0)"
DEBUG: generated sql query for job 1253 and task 12
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290005 lineitem WHERE (l_quantity < 5.0)"
DEBUG: generated sql query for job 1253 and task 14
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290006 lineitem WHERE (l_quantity < 5.0)"
DEBUG: generated sql query for job 1253 and task 16
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290007 lineitem WHERE (l_quantity < 5.0)"
DEBUG: assigned task 2 to node localhost:57637
DEBUG: assigned task 4 to node localhost:57638
DEBUG: assigned task 6 to node localhost:57637
DEBUG: assigned task 8 to node localhost:57638
DEBUG: assigned task 10 to node localhost:57637
DEBUG: assigned task 12 to node localhost:57638
DEBUG: assigned task 14 to node localhost:57637
DEBUG: assigned task 16 to node localhost:57638
DEBUG: generated sql query for job 1254 and task 2
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290006 orders WHERE (o_totalprice <> 4.0)"
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290008 orders WHERE (o_totalprice <> 4.0)"
DEBUG: generated sql query for job 1254 and task 4
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290007 orders WHERE (o_totalprice <> 4.0)"
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290009 orders WHERE (o_totalprice <> 4.0)"
DEBUG: assigned task 2 to node localhost:57637
DEBUG: assigned task 4 to node localhost:57638
DEBUG: join prunable for task partitionId 0 and 1
@ -191,27 +205,27 @@ DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
DEBUG: generated sql query for job 1255 and task 3
DETAIL: query string: "SELECT "pg_merge_job_1253.task_000013".intermediate_column_1253_0 AS l_partkey, "pg_merge_job_1254.task_000005".intermediate_column_1254_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1253.task_000013 "pg_merge_job_1253.task_000013" JOIN pg_merge_job_1254.task_000005 "pg_merge_job_1254.task_000005" ON (("pg_merge_job_1253.task_000013".intermediate_column_1253_1 = "pg_merge_job_1254.task_000005".intermediate_column_1254_1))) WHERE true GROUP BY "pg_merge_job_1253.task_000013".intermediate_column_1253_0, "pg_merge_job_1254.task_000005".intermediate_column_1254_0"
DETAIL: query string: "SELECT "pg_merge_job_1253.task_000017".intermediate_column_1253_0 AS l_partkey, "pg_merge_job_1254.task_000005".intermediate_column_1254_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1253.task_000017 "pg_merge_job_1253.task_000017" JOIN pg_merge_job_1254.task_000005 "pg_merge_job_1254.task_000005" ON (("pg_merge_job_1253.task_000017".intermediate_column_1253_1 = "pg_merge_job_1254.task_000005".intermediate_column_1254_1))) WHERE true GROUP BY "pg_merge_job_1253.task_000017".intermediate_column_1253_0, "pg_merge_job_1254.task_000005".intermediate_column_1254_0"
DEBUG: generated sql query for job 1255 and task 6
DETAIL: query string: "SELECT "pg_merge_job_1253.task_000020".intermediate_column_1253_0 AS l_partkey, "pg_merge_job_1254.task_000008".intermediate_column_1254_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1253.task_000020 "pg_merge_job_1253.task_000020" JOIN pg_merge_job_1254.task_000008 "pg_merge_job_1254.task_000008" ON (("pg_merge_job_1253.task_000020".intermediate_column_1253_1 = "pg_merge_job_1254.task_000008".intermediate_column_1254_1))) WHERE true GROUP BY "pg_merge_job_1253.task_000020".intermediate_column_1253_0, "pg_merge_job_1254.task_000008".intermediate_column_1254_0"
DETAIL: query string: "SELECT "pg_merge_job_1253.task_000026".intermediate_column_1253_0 AS l_partkey, "pg_merge_job_1254.task_000008".intermediate_column_1254_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1253.task_000026 "pg_merge_job_1253.task_000026" JOIN pg_merge_job_1254.task_000008 "pg_merge_job_1254.task_000008" ON (("pg_merge_job_1253.task_000026".intermediate_column_1253_1 = "pg_merge_job_1254.task_000008".intermediate_column_1254_1))) WHERE true GROUP BY "pg_merge_job_1253.task_000026".intermediate_column_1253_0, "pg_merge_job_1254.task_000008".intermediate_column_1254_0"
DEBUG: generated sql query for job 1255 and task 9
DETAIL: query string: "SELECT "pg_merge_job_1253.task_000027".intermediate_column_1253_0 AS l_partkey, "pg_merge_job_1254.task_000011".intermediate_column_1254_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1253.task_000027 "pg_merge_job_1253.task_000027" JOIN pg_merge_job_1254.task_000011 "pg_merge_job_1254.task_000011" ON (("pg_merge_job_1253.task_000027".intermediate_column_1253_1 = "pg_merge_job_1254.task_000011".intermediate_column_1254_1))) WHERE true GROUP BY "pg_merge_job_1253.task_000027".intermediate_column_1253_0, "pg_merge_job_1254.task_000011".intermediate_column_1254_0"
DETAIL: query string: "SELECT "pg_merge_job_1253.task_000035".intermediate_column_1253_0 AS l_partkey, "pg_merge_job_1254.task_000011".intermediate_column_1254_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1253.task_000035 "pg_merge_job_1253.task_000035" JOIN pg_merge_job_1254.task_000011 "pg_merge_job_1254.task_000011" ON (("pg_merge_job_1253.task_000035".intermediate_column_1253_1 = "pg_merge_job_1254.task_000011".intermediate_column_1254_1))) WHERE true GROUP BY "pg_merge_job_1253.task_000035".intermediate_column_1253_0, "pg_merge_job_1254.task_000011".intermediate_column_1254_0"
DEBUG: generated sql query for job 1255 and task 12
DETAIL: query string: "SELECT "pg_merge_job_1253.task_000034".intermediate_column_1253_0 AS l_partkey, "pg_merge_job_1254.task_000014".intermediate_column_1254_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1253.task_000034 "pg_merge_job_1253.task_000034" JOIN pg_merge_job_1254.task_000014 "pg_merge_job_1254.task_000014" ON (("pg_merge_job_1253.task_000034".intermediate_column_1253_1 = "pg_merge_job_1254.task_000014".intermediate_column_1254_1))) WHERE true GROUP BY "pg_merge_job_1253.task_000034".intermediate_column_1253_0, "pg_merge_job_1254.task_000014".intermediate_column_1254_0"
DETAIL: query string: "SELECT "pg_merge_job_1253.task_000044".intermediate_column_1253_0 AS l_partkey, "pg_merge_job_1254.task_000014".intermediate_column_1254_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1253.task_000044 "pg_merge_job_1253.task_000044" JOIN pg_merge_job_1254.task_000014 "pg_merge_job_1254.task_000014" ON (("pg_merge_job_1253.task_000044".intermediate_column_1253_1 = "pg_merge_job_1254.task_000014".intermediate_column_1254_1))) WHERE true GROUP BY "pg_merge_job_1253.task_000044".intermediate_column_1253_0, "pg_merge_job_1254.task_000014".intermediate_column_1254_0"
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 13
DETAIL: Creating dependency on merge taskId 17
DEBUG: pruning merge fetch taskId 2
DETAIL: Creating dependency on merge taskId 5
DEBUG: pruning merge fetch taskId 4
DETAIL: Creating dependency on merge taskId 20
DETAIL: Creating dependency on merge taskId 26
DEBUG: pruning merge fetch taskId 5
DETAIL: Creating dependency on merge taskId 8
DEBUG: pruning merge fetch taskId 7
DETAIL: Creating dependency on merge taskId 27
DETAIL: Creating dependency on merge taskId 35
DEBUG: pruning merge fetch taskId 8
DETAIL: Creating dependency on merge taskId 11
DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 34
DETAIL: Creating dependency on merge taskId 44
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 14
DEBUG: assigned task 3 to node localhost:57638

View File

@ -42,8 +42,8 @@ FROM
WHERE
o_custkey = c_custkey AND
o_orderkey < 0;
DEBUG: predicate pruning for shardId 290006
DEBUG: predicate pruning for shardId 290007
DEBUG: predicate pruning for shardId 290008
DEBUG: predicate pruning for shardId 290009
count
-------
@ -58,7 +58,7 @@ FROM
WHERE
o_custkey = c_custkey AND
c_custkey < 0;
DEBUG: predicate pruning for shardId 290008
DEBUG: predicate pruning for shardId 290010
DEBUG: predicate pruning for shardId 280001
DEBUG: predicate pruning for shardId 280000
count
@ -88,19 +88,19 @@ DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 13
DETAIL: Creating dependency on merge taskId 17
DEBUG: pruning merge fetch taskId 2
DETAIL: Creating dependency on merge taskId 7
DEBUG: pruning merge fetch taskId 4
DETAIL: Creating dependency on merge taskId 20
DETAIL: Creating dependency on merge taskId 26
DEBUG: pruning merge fetch taskId 5
DETAIL: Creating dependency on merge taskId 11
DEBUG: pruning merge fetch taskId 7
DETAIL: Creating dependency on merge taskId 27
DETAIL: Creating dependency on merge taskId 35
DEBUG: pruning merge fetch taskId 8
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 34
DETAIL: Creating dependency on merge taskId 44
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 19
count
@ -123,6 +123,8 @@ DEBUG: predicate pruning for shardId 290002
DEBUG: predicate pruning for shardId 290003
DEBUG: predicate pruning for shardId 290004
DEBUG: predicate pruning for shardId 290005
DEBUG: predicate pruning for shardId 290006
DEBUG: predicate pruning for shardId 290007
count
-------

View File

@ -72,58 +72,110 @@ DEBUG: assigned task 18 to node localhost:57637
DEBUG: assigned task 12 to node localhost:57638
DEBUG: assigned task 3 to node localhost:57637
DEBUG: assigned task 6 to node localhost:57638
DEBUG: join prunable for intervals [1,2496] and [2497,4964]
DEBUG: join prunable for intervals [1,2496] and [4965,5986]
DEBUG: join prunable for intervals [1,2496] and [8997,11554]
DEBUG: join prunable for intervals [1,2496] and [11554,13920]
DEBUG: join prunable for intervals [1,2496] and [13921,14947]
DEBUG: join prunable for intervals [2497,4964] and [1,2496]
DEBUG: join prunable for intervals [2497,4964] and [4965,5986]
DEBUG: join prunable for intervals [2497,4964] and [8997,11554]
DEBUG: join prunable for intervals [2497,4964] and [11554,13920]
DEBUG: join prunable for intervals [2497,4964] and [13921,14947]
DEBUG: join prunable for intervals [4965,5986] and [1,2496]
DEBUG: join prunable for intervals [4965,5986] and [2497,4964]
DEBUG: join prunable for intervals [4965,5986] and [8997,11554]
DEBUG: join prunable for intervals [4965,5986] and [11554,13920]
DEBUG: join prunable for intervals [4965,5986] and [13921,14947]
DEBUG: join prunable for intervals [8997,11554] and [1,2496]
DEBUG: join prunable for intervals [8997,11554] and [2497,4964]
DEBUG: join prunable for intervals [8997,11554] and [4965,5986]
DEBUG: join prunable for intervals [8997,11554] and [13921,14947]
DEBUG: join prunable for intervals [11554,13920] and [1,2496]
DEBUG: join prunable for intervals [11554,13920] and [2497,4964]
DEBUG: join prunable for intervals [11554,13920] and [4965,5986]
DEBUG: join prunable for intervals [11554,13920] and [13921,14947]
DEBUG: join prunable for intervals [13921,14947] and [1,2496]
DEBUG: join prunable for intervals [13921,14947] and [2497,4964]
DEBUG: join prunable for intervals [13921,14947] and [4965,5986]
DEBUG: join prunable for intervals [13921,14947] and [8997,11554]
DEBUG: join prunable for intervals [13921,14947] and [11554,13920]
DEBUG: join prunable for intervals [1,1509] and [2951,4455]
DEBUG: join prunable for intervals [1,1509] and [4480,5986]
DEBUG: join prunable for intervals [1,1509] and [8997,10560]
DEBUG: join prunable for intervals [1,1509] and [10560,12036]
DEBUG: join prunable for intervals [1,1509] and [12036,13473]
DEBUG: join prunable for intervals [1,1509] and [13473,14947]
DEBUG: join prunable for intervals [1509,4964] and [8997,10560]
DEBUG: join prunable for intervals [1509,4964] and [10560,12036]
DEBUG: join prunable for intervals [1509,4964] and [12036,13473]
DEBUG: join prunable for intervals [1509,4964] and [13473,14947]
DEBUG: join prunable for intervals [2951,4455] and [1,1509]
DEBUG: join prunable for intervals [2951,4455] and [4480,5986]
DEBUG: join prunable for intervals [2951,4455] and [8997,10560]
DEBUG: join prunable for intervals [2951,4455] and [10560,12036]
DEBUG: join prunable for intervals [2951,4455] and [12036,13473]
DEBUG: join prunable for intervals [2951,4455] and [13473,14947]
DEBUG: join prunable for intervals [4480,5986] and [1,1509]
DEBUG: join prunable for intervals [4480,5986] and [2951,4455]
DEBUG: join prunable for intervals [4480,5986] and [8997,10560]
DEBUG: join prunable for intervals [4480,5986] and [10560,12036]
DEBUG: join prunable for intervals [4480,5986] and [12036,13473]
DEBUG: join prunable for intervals [4480,5986] and [13473,14947]
DEBUG: join prunable for intervals [8997,10560] and [1,1509]
DEBUG: join prunable for intervals [8997,10560] and [1509,4964]
DEBUG: join prunable for intervals [8997,10560] and [2951,4455]
DEBUG: join prunable for intervals [8997,10560] and [4480,5986]
DEBUG: join prunable for intervals [8997,10560] and [12036,13473]
DEBUG: join prunable for intervals [8997,10560] and [13473,14947]
DEBUG: join prunable for intervals [10560,12036] and [1,1509]
DEBUG: join prunable for intervals [10560,12036] and [1509,4964]
DEBUG: join prunable for intervals [10560,12036] and [2951,4455]
DEBUG: join prunable for intervals [10560,12036] and [4480,5986]
DEBUG: join prunable for intervals [10560,12036] and [13473,14947]
DEBUG: join prunable for intervals [12036,13473] and [1,1509]
DEBUG: join prunable for intervals [12036,13473] and [1509,4964]
DEBUG: join prunable for intervals [12036,13473] and [2951,4455]
DEBUG: join prunable for intervals [12036,13473] and [4480,5986]
DEBUG: join prunable for intervals [12036,13473] and [8997,10560]
DEBUG: join prunable for intervals [13473,14947] and [1,1509]
DEBUG: join prunable for intervals [13473,14947] and [1509,4964]
DEBUG: join prunable for intervals [13473,14947] and [2951,4455]
DEBUG: join prunable for intervals [13473,14947] and [4480,5986]
DEBUG: join prunable for intervals [13473,14947] and [8997,10560]
DEBUG: join prunable for intervals [13473,14947] and [10560,12036]
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 19
DEBUG: pruning merge fetch taskId 4
DETAIL: Creating dependency on merge taskId 26
DETAIL: Creating dependency on merge taskId 19
DEBUG: pruning merge fetch taskId 7
DETAIL: Creating dependency on merge taskId 33
DETAIL: Creating dependency on merge taskId 26
DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 40
DETAIL: Creating dependency on merge taskId 26
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 40
DETAIL: Creating dependency on merge taskId 26
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 47
DETAIL: Creating dependency on merge taskId 26
DEBUG: pruning merge fetch taskId 19
DETAIL: Creating dependency on merge taskId 47
DETAIL: Creating dependency on merge taskId 33
DEBUG: pruning merge fetch taskId 22
DETAIL: Creating dependency on merge taskId 33
DEBUG: pruning merge fetch taskId 25
DETAIL: Creating dependency on merge taskId 40
DEBUG: pruning merge fetch taskId 28
DETAIL: Creating dependency on merge taskId 40
DEBUG: pruning merge fetch taskId 31
DETAIL: Creating dependency on merge taskId 47
DEBUG: pruning merge fetch taskId 34
DETAIL: Creating dependency on merge taskId 47
DEBUG: pruning merge fetch taskId 37
DETAIL: Creating dependency on merge taskId 54
DEBUG: pruning merge fetch taskId 40
DETAIL: Creating dependency on merge taskId 54
DEBUG: pruning merge fetch taskId 43
DETAIL: Creating dependency on merge taskId 54
DEBUG: pruning merge fetch taskId 46
DETAIL: Creating dependency on merge taskId 61
DEBUG: pruning merge fetch taskId 49
DETAIL: Creating dependency on merge taskId 61
DEBUG: pruning merge fetch taskId 52
DETAIL: Creating dependency on merge taskId 61
DEBUG: pruning merge fetch taskId 55
DETAIL: Creating dependency on merge taskId 68
DEBUG: pruning merge fetch taskId 58
DETAIL: Creating dependency on merge taskId 68
DEBUG: assigned task 3 to node localhost:57637
DEBUG: assigned task 6 to node localhost:57638
DEBUG: assigned task 21 to node localhost:57638
DEBUG: assigned task 9 to node localhost:57637
DEBUG: assigned task 12 to node localhost:57638
DEBUG: assigned task 18 to node localhost:57637
DEBUG: assigned task 24 to node localhost:57638
DEBUG: propagating assignment from merge task 40 to constrained sql task 15
DEBUG: propagating assignment from merge task 47 to constrained sql task 21
DEBUG: assigned task 27 to node localhost:57638
DEBUG: assigned task 33 to node localhost:57637
DEBUG: assigned task 48 to node localhost:57638
DEBUG: assigned task 39 to node localhost:57637
DEBUG: assigned task 57 to node localhost:57638
DEBUG: propagating assignment from merge task 19 to constrained sql task 6
DEBUG: propagating assignment from merge task 26 to constrained sql task 12
DEBUG: propagating assignment from merge task 26 to constrained sql task 15
DEBUG: propagating assignment from merge task 26 to constrained sql task 18
DEBUG: propagating assignment from merge task 33 to constrained sql task 24
DEBUG: propagating assignment from merge task 40 to constrained sql task 30
DEBUG: propagating assignment from merge task 47 to constrained sql task 36
DEBUG: propagating assignment from merge task 54 to constrained sql task 42
DEBUG: propagating assignment from merge task 54 to constrained sql task 45
DEBUG: propagating assignment from merge task 61 to constrained sql task 51
DEBUG: propagating assignment from merge task 61 to constrained sql task 54
DEBUG: propagating assignment from merge task 68 to constrained sql task 60
DEBUG: CommitTransactionCommand
count
-------
@ -162,6 +214,8 @@ DEBUG: assigned task 6 to node localhost:57637
DEBUG: assigned task 8 to node localhost:57638
DEBUG: assigned task 10 to node localhost:57637
DEBUG: assigned task 12 to node localhost:57638
DEBUG: assigned task 14 to node localhost:57637
DEBUG: assigned task 16 to node localhost:57638
DEBUG: assigned task 6 to node localhost:57637
DEBUG: assigned task 4 to node localhost:57638
DEBUG: assigned task 2 to node localhost:57637
@ -178,19 +232,19 @@ DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 13
DETAIL: Creating dependency on merge taskId 17
DEBUG: pruning merge fetch taskId 2
DETAIL: Creating dependency on merge taskId 7
DEBUG: pruning merge fetch taskId 4
DETAIL: Creating dependency on merge taskId 20
DETAIL: Creating dependency on merge taskId 26
DEBUG: pruning merge fetch taskId 5
DETAIL: Creating dependency on merge taskId 11
DEBUG: pruning merge fetch taskId 7
DETAIL: Creating dependency on merge taskId 27
DETAIL: Creating dependency on merge taskId 35
DEBUG: pruning merge fetch taskId 8
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 34
DETAIL: Creating dependency on merge taskId 44
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 19
DEBUG: assigned task 3 to node localhost:57637

View File

@ -41,8 +41,8 @@ DEBUG: push down of limit count: 600
153937 | 2761321906
199283 | 2726988572
185925 | 2672114100
196629 | 2622637602
157064 | 2614644408
189336 | 2596175232
(10 rows)
-- Disable limit optimization for our second test. This time, we have a query
@ -81,15 +81,15 @@ DEBUG: push down of limit count: 150
c_custkey | c_name | lineitem_count
-----------+--------------------+----------------
43 | Customer#000000043 | 42
370 | Customer#000000370 | 36
370 | Customer#000000370 | 38
79 | Customer#000000079 | 37
689 | Customer#000000689 | 36
472 | Customer#000000472 | 35
685 | Customer#000000685 | 35
643 | Customer#000000643 | 34
226 | Customer#000000226 | 33
496 | Customer#000000496 | 32
685 | Customer#000000685 | 32
304 | Customer#000000304 | 31
472 | Customer#000000472 | 31
79 | Customer#000000079 | 30
145 | Customer#000000145 | 30
(10 rows)
RESET citus.large_table_shard_count;

View File

@ -11,13 +11,13 @@ SET citus.large_table_shard_count TO 2;
SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290000;
shardminvalue | shardmaxvalue
---------------+---------------
1 | 2496
1 | 1509
(1 row)
SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290001;
shardminvalue | shardmaxvalue
---------------+---------------
2497 | 4964
1509 | 2951
(1 row)
-- Check that partition and join pruning works when min/max values exist
@ -25,8 +25,10 @@ SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290001;
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1;
DEBUG: predicate pruning for shardId 290001
DEBUG: predicate pruning for shardId 290002
DEBUG: predicate pruning for shardId 290004
DEBUG: predicate pruning for shardId 290003
DEBUG: predicate pruning for shardId 290005
DEBUG: predicate pruning for shardId 290006
DEBUG: predicate pruning for shardId 290007
l_orderkey | l_linenumber | l_shipdate
------------+--------------+------------
1 | 1 | 03-13-1996
@ -45,12 +47,14 @@ DEBUG: predicate pruning for shardId 290005
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
WHERE l_orderkey = o_orderkey;
DEBUG: join prunable for intervals [1,2496] and [8997,14946]
DEBUG: join prunable for intervals [2497,4964] and [8997,14946]
DEBUG: join prunable for intervals [4965,5986] and [8997,14946]
DEBUG: join prunable for intervals [8997,11554] and [1,5986]
DEBUG: join prunable for intervals [11554,13920] and [1,5986]
DEBUG: join prunable for intervals [13921,14947] and [1,5986]
DEBUG: join prunable for intervals [1,1509] and [8997,14946]
DEBUG: join prunable for intervals [1509,2951] and [8997,14946]
DEBUG: join prunable for intervals [2951,4455] and [8997,14946]
DEBUG: join prunable for intervals [4480,5986] and [8997,14946]
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
sum | avg
-------+--------------------
36086 | 3.0076679446574429
@ -62,8 +66,10 @@ UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 290000;
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
DEBUG: predicate pruning for shardId 290001
DEBUG: predicate pruning for shardId 290002
DEBUG: predicate pruning for shardId 290004
DEBUG: predicate pruning for shardId 290003
DEBUG: predicate pruning for shardId 290005
DEBUG: predicate pruning for shardId 290006
DEBUG: predicate pruning for shardId 290007
l_orderkey | l_linenumber | l_shipdate
------------+--------------+------------
9030 | 1 | 09-02-1998
@ -76,11 +82,13 @@ DEBUG: predicate pruning for shardId 290005
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
WHERE l_orderkey = o_orderkey;
DEBUG: join prunable for intervals [2497,4964] and [8997,14946]
DEBUG: join prunable for intervals [4965,5986] and [8997,14946]
DEBUG: join prunable for intervals [8997,11554] and [1,5986]
DEBUG: join prunable for intervals [11554,13920] and [1,5986]
DEBUG: join prunable for intervals [13921,14947] and [1,5986]
DEBUG: join prunable for intervals [1509,2951] and [8997,14946]
DEBUG: join prunable for intervals [2951,4455] and [8997,14946]
DEBUG: join prunable for intervals [4480,5986] and [8997,14946]
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
sum | avg
-------+--------------------
36086 | 3.0076679446574429
@ -91,8 +99,10 @@ DEBUG: join prunable for intervals [13921,14947] and [1,5986]
UPDATE pg_dist_shard SET shardmaxvalue = NULL WHERE shardid = 290001;
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
DEBUG: predicate pruning for shardId 290002
DEBUG: predicate pruning for shardId 290004
DEBUG: predicate pruning for shardId 290003
DEBUG: predicate pruning for shardId 290005
DEBUG: predicate pruning for shardId 290006
DEBUG: predicate pruning for shardId 290007
l_orderkey | l_linenumber | l_shipdate
------------+--------------+------------
9030 | 1 | 09-02-1998
@ -105,10 +115,12 @@ DEBUG: predicate pruning for shardId 290005
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
WHERE l_orderkey = o_orderkey;
DEBUG: join prunable for intervals [4965,5986] and [8997,14946]
DEBUG: join prunable for intervals [8997,11554] and [1,5986]
DEBUG: join prunable for intervals [11554,13920] and [1,5986]
DEBUG: join prunable for intervals [13921,14947] and [1,5986]
DEBUG: join prunable for intervals [2951,4455] and [8997,14946]
DEBUG: join prunable for intervals [4480,5986] and [8997,14946]
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
sum | avg
-------+--------------------
36086 | 3.0076679446574429
@ -120,8 +132,10 @@ UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = 290000;
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
DEBUG: predicate pruning for shardId 290000
DEBUG: predicate pruning for shardId 290002
DEBUG: predicate pruning for shardId 290004
DEBUG: predicate pruning for shardId 290003
DEBUG: predicate pruning for shardId 290005
DEBUG: predicate pruning for shardId 290006
DEBUG: predicate pruning for shardId 290007
l_orderkey | l_linenumber | l_shipdate
------------+--------------+------------
9030 | 1 | 09-02-1998
@ -134,11 +148,13 @@ DEBUG: predicate pruning for shardId 290005
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
WHERE l_orderkey = o_orderkey;
DEBUG: join prunable for intervals [0,2496] and [8997,14946]
DEBUG: join prunable for intervals [4965,5986] and [8997,14946]
DEBUG: join prunable for intervals [8997,11554] and [1,5986]
DEBUG: join prunable for intervals [11554,13920] and [1,5986]
DEBUG: join prunable for intervals [13921,14947] and [1,5986]
DEBUG: join prunable for intervals [0,1509] and [8997,14946]
DEBUG: join prunable for intervals [2951,4455] and [8997,14946]
DEBUG: join prunable for intervals [4480,5986] and [8997,14946]
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
sum | avg
-------+--------------------
36086 | 3.0076679446574429

View File

@ -11,8 +11,10 @@ SET client_min_messages TO DEBUG2;
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1;
DEBUG: predicate pruning for shardId 290001
DEBUG: predicate pruning for shardId 290002
DEBUG: predicate pruning for shardId 290004
DEBUG: predicate pruning for shardId 290003
DEBUG: predicate pruning for shardId 290005
DEBUG: predicate pruning for shardId 290006
DEBUG: predicate pruning for shardId 290007
l_orderkey | l_linenumber | l_shipdate
------------+--------------+------------
1 | 1 | 03-13-1996
@ -38,6 +40,7 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem WHERE l_orderkey > 903
DEBUG: predicate pruning for shardId 290000
DEBUG: predicate pruning for shardId 290001
DEBUG: predicate pruning for shardId 290002
DEBUG: predicate pruning for shardId 290003
sum | avg
-------+--------------------
17999 | 3.0189533713518953
@ -45,7 +48,7 @@ DEBUG: predicate pruning for shardId 290002
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem
WHERE (l_orderkey < 4000 OR l_orderkey > 9030);
DEBUG: predicate pruning for shardId 290002
DEBUG: predicate pruning for shardId 290003
sum | avg
-------+--------------------
30184 | 3.0159872102318145
@ -59,6 +62,8 @@ DEBUG: predicate pruning for shardId 290002
DEBUG: predicate pruning for shardId 290003
DEBUG: predicate pruning for shardId 290004
DEBUG: predicate pruning for shardId 290005
DEBUG: predicate pruning for shardId 290006
DEBUG: predicate pruning for shardId 290007
sum | avg
-----+-----
|

View File

@ -27,9 +27,19 @@ CREATE TABLE lineitem_range (
l_comment varchar(44) not null );
SELECT master_create_distributed_table('lineitem_range', 'l_orderkey', 'range');
SELECT master_create_empty_shard('lineitem_range') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('lineitem_range') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14947
WHERE shardid = :new_shard_id;
SET citus.shard_max_size TO "500MB";
\STAGE lineitem_range FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\STAGE lineitem_range FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\COPY lineitem_range FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\COPY lineitem_range FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
RESET citus.shard_max_size;
-- Run aggregate(distinct) on partition column for range partitioned table

View File

@ -24,7 +24,7 @@ CREATE TABLE aggregate_type (
interval_value interval not null);
SELECT master_create_distributed_table('aggregate_type', 'float_value', 'append');
\STAGE aggregate_type FROM '@abs_srcdir@/data/agg_type.data'
\COPY aggregate_type FROM '@abs_srcdir@/data/agg_type.data'
-- Test conversions using aggregates on floats and division

View File

@ -29,7 +29,7 @@ CREATE TABLE lineitem_alter (
l_comment varchar(44) not null
);
SELECT master_create_distributed_table('lineitem_alter', 'l_orderkey', 'append');
\STAGE lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\COPY lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
-- Verify that we can add columns
@ -57,8 +57,8 @@ SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1;
ALTER TABLE lineitem_alter ALTER COLUMN float_column SET DEFAULT 1;
ALTER TABLE lineitem_alter ALTER COLUMN int_column1 DROP DEFAULT;
-- \stage to verify that default values take effect
\STAGE lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
-- \COPY to verify that default values take effect
\COPY lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column;
SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1;
@ -71,16 +71,17 @@ ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET NOT NULL;
-- Drop default so that NULLs will be inserted for this column
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP DEFAULT;
-- \stage should fail because it will try to insert NULLs for a NOT NULL column
\STAGE lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
-- \COPY should fail because it will try to insert NULLs for a NOT NULL column
-- Note, this operation will create a table on the workers but it won't be in the metadata
\COPY lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
-- Verify that DROP NOT NULL works
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP NOT NULL;
\d lineitem_alter
-- \stage should succeed now
\STAGE lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
-- \COPY should succeed now
\COPY lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
SELECT count(*) from lineitem_alter;
-- Verify that SET DATA TYPE works
@ -258,7 +259,8 @@ DROP TABLESPACE super_fast_ssd;
SET citus.enable_ddl_propagation to true;
SELECT master_apply_delete_command('DELETE FROM lineitem_alter');
DROP TABLE lineitem_alter;
-- check that nothing's left over on workers
-- check that nothing's left over on workers, other than the leftover shard created
-- during the unsuccessful COPY
\c - - - :worker_1_port
SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%';
\c - - - :master_port

View File

@ -32,12 +32,12 @@ SELECT master_create_worker_shards('multi_append_table_to_shard_right_hash', 1,
-- Replicate 'left' table on both workers
SELECT set_config('citus.shard_replication_factor', '2', false);
\STAGE multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data'
\STAGE multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data'
\COPY multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data'
\COPY multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data'
-- Place 'right' table only on the primary worker
SELECT set_config('citus.shard_replication_factor', '1', false);
\STAGE multi_append_table_to_shard_right FROM '@abs_srcdir@/data/agg.data'
\COPY multi_append_table_to_shard_right FROM '@abs_srcdir@/data/agg.data'
-- Reset shard replication factor to ensure tasks will be assigned to both workers
SELECT set_config('citus.shard_replication_factor', '2', false);

View File

@ -11,6 +11,6 @@ CREATE TABLE nation (
n_comment varchar(152));
SELECT master_create_distributed_table('tpch.nation', 'n_nationkey', 'append');
\STAGE tpch.nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
\COPY tpch.nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
SELECT count(*) from tpch.nation;

View File

@ -13,8 +13,8 @@ ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 9000000;
-- Stage additional data to start using large shard identifiers.
\STAGE lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\STAGE lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\COPY lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\COPY lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
-- Query #1 from the TPC-H decision support benchmark.

View File

@ -19,9 +19,9 @@ CREATE TABLE customer_delete_protocol (
c_comment varchar(117) not null);
SELECT master_create_distributed_table('customer_delete_protocol', 'c_custkey', 'append');
\STAGE customer_delete_protocol FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
\STAGE customer_delete_protocol FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|'
\STAGE customer_delete_protocol FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|'
\COPY customer_delete_protocol FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
\COPY customer_delete_protocol FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|'
\COPY customer_delete_protocol FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|'
-- Testing master_apply_delete_command
-- Check that we don't support conditions on columns other than partition key.

View File

@ -53,11 +53,11 @@ FROM
multi_outer_join_left a LEFT JOIN multi_outer_join_third b ON (l_custkey = t_custkey);
-- Left table is a large table
\STAGE multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-10.data' with delimiter '|'
\STAGE multi_outer_join_left FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|'
\COPY multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-10.data' with delimiter '|'
\COPY multi_outer_join_left FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|'
-- Right table is a small table
\STAGE multi_outer_join_right FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|'
\COPY multi_outer_join_right FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|'
-- Make sure we do not crash if one table has no shards
SELECT
@ -71,7 +71,7 @@ FROM
multi_outer_join_third a LEFT JOIN multi_outer_join_right b ON (r_custkey = t_custkey);
-- Third table is a single shard table with all data
\STAGE multi_outer_join_third FROM '@abs_srcdir@/data/customer-1-30.data' with delimiter '|'
\COPY multi_outer_join_third FROM '@abs_srcdir@/data/customer-1-30.data' with delimiter '|'
-- Regular outer join should return results for all rows
SELECT
@ -150,7 +150,7 @@ FROM
-- Turn the right table into a large table
\STAGE multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
\COPY multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
-- Shards do not have 1-1 matching. We should error here.
@ -164,11 +164,11 @@ SELECT * FROM master_apply_delete_command('DELETE FROM multi_outer_join_left');
SELECT * FROM master_apply_delete_command('DELETE FROM multi_outer_join_right');
-- reload shards with 1-1 matching
\STAGE multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|'
\STAGE multi_outer_join_left FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
\COPY multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|'
\COPY multi_outer_join_left FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
\STAGE multi_outer_join_right FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|'
\STAGE multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
\COPY multi_outer_join_right FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|'
\COPY multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
-- multi_outer_join_third is a single shard table
@ -409,7 +409,7 @@ ORDER BY cnt DESC, l1.l_custkey DESC
LIMIT 20;
-- Add a shard to the left table that overlaps with multiple shards in the right
\STAGE multi_outer_join_left FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
\COPY multi_outer_join_left FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
-- All outer joins should error out

View File

@ -6,29 +6,16 @@
-- citus.shard_max_size. These values are set in pg_regress_multi.pl. Shard placement
-- policy is left to the default value (round-robin) to test the common install case.
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 290000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 290000;
\COPY lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\COPY lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\STAGE lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\STAGE lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\COPY orders FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
\COPY orders FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
\STAGE orders FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
\STAGE orders FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
\STAGE customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
\STAGE nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
\STAGE part FROM '@abs_srcdir@/data/part.data' with delimiter '|'
\STAGE supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|'
-- check that we error out if we try to stage into a hash partitioned table
CREATE TABLE nation_hash_partitioned (
n_nationkey integer not null,
n_name char(25) not null,
n_regionkey integer not null,
n_comment varchar(152));
SELECT master_create_distributed_table('nation_hash_partitioned', 'n_nationkey', 'hash');
\STAGE nation_hash_partitioned FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
\COPY customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
\COPY nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
\COPY part FROM '@abs_srcdir@/data/part.data' with delimiter '|'
\COPY supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|'

View File

@ -15,7 +15,7 @@ SET citus.shard_max_size TO "256kB";
CREATE TABLE large_records_table (data_id integer, data text);
SELECT master_create_distributed_table('large_records_table', 'data_id', 'append');
\STAGE large_records_table FROM '@abs_srcdir@/data/large_records.data' with delimiter '|'
\COPY large_records_table FROM '@abs_srcdir@/data/large_records.data' with delimiter '|'
SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard, pg_class
WHERE pg_class.oid=logicalrelid AND relname='large_records_table'

View File

@ -11,6 +11,6 @@ ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 280000;
-- staging causes the planner to consider customer and part tables as large, and
-- evaluate plans where some of the underlying tables need to be repartitioned.
\STAGE customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|'
\STAGE customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|'
\STAGE part FROM '@abs_srcdir@/data/part.more.data' with delimiter '|'
\COPY customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|'
\COPY customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|'
\COPY part FROM '@abs_srcdir@/data/part.more.data' with delimiter '|'

View File

@ -79,13 +79,33 @@ FROM
-- Stage data to tables.
SELECT master_create_empty_shard('lineitem_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('lineitem_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14947
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('orders_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('orders_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14946
WHERE shardid = :new_shard_id;
SET citus.shard_max_size TO "1MB";
\STAGE lineitem_subquery FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\STAGE lineitem_subquery FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\COPY lineitem_subquery FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\COPY lineitem_subquery FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\STAGE orders_subquery FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
\STAGE orders_subquery FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
\COPY orders_subquery FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
\COPY orders_subquery FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
-- Check that we error out if shard min/max values are not exactly same.
@ -280,10 +300,13 @@ SELECT max(l_orderkey) FROM
) z
) y;
-- Load more data to one relation, then test if we error out because of different
-- Add one more shard to one relation, then test if we error out because of different
-- shard counts for joining relations.
\STAGE orders_subquery FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
SELECT master_create_empty_shard('orders_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 15000, shardmaxvalue = 20000
WHERE shardid = :new_shard_id;
SELECT
avg(unit_price)

View File

@ -27,9 +27,17 @@ SELECT master_create_distributed_table('lineitem_range', 'l_orderkey', 'range');
(1 row)
SELECT master_create_empty_shard('lineitem_range') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('lineitem_range') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14947
WHERE shardid = :new_shard_id;
SET citus.shard_max_size TO "500MB";
\STAGE lineitem_range FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\STAGE lineitem_range FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\COPY lineitem_range FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\COPY lineitem_range FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
RESET citus.shard_max_size;
-- Run aggregate(distinct) on partition column for range partitioned table
SELECT count(distinct l_orderkey) FROM lineitem_range;

View File

@ -41,7 +41,7 @@ SELECT master_create_distributed_table('aggregate_type', 'float_value', 'append'
(1 row)
\STAGE aggregate_type FROM '@abs_srcdir@/data/agg_type.data'
\COPY aggregate_type FROM '@abs_srcdir@/data/agg_type.data'
-- Test conversions using aggregates on floats and division
SELECT min(float_value), max(float_value),
sum(float_value), count(float_value), avg(float_value)

View File

@ -30,7 +30,7 @@ SELECT master_create_distributed_table('lineitem_alter', 'l_orderkey', 'append')
(1 row)
\STAGE lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\COPY lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
-- Verify that we can add columns
ALTER TABLE lineitem_alter ADD COLUMN float_column FLOAT;
NOTICE: using one-phase commit for distributed DDL commands
@ -121,8 +121,8 @@ ALTER TABLE lineitem_alter ALTER COLUMN float_column SET DEFAULT 1;
NOTICE: using one-phase commit for distributed DDL commands
HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
ALTER TABLE lineitem_alter ALTER COLUMN int_column1 DROP DEFAULT;
-- \stage to verify that default values take effect
\STAGE lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
-- \COPY to verify that default values take effect
\COPY lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column;
float_column | count
--------------+-------
@ -167,15 +167,11 @@ ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET NOT NULL;
-- Drop default so that NULLs will be inserted for this column
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP DEFAULT;
-- \stage should fail because it will try to insert NULLs for a NOT NULL column
\STAGE lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
-- \COPY should fail because it will try to insert NULLs for a NOT NULL column
-- Note, this operation will create a table on the workers but it won't be in the metadata
\COPY lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
ERROR: null value in column "int_column2" violates not-null constraint
DETAIL: Failing row contains (1, 155190, 7706, 1, 17.00, 21168.23, 0.04, 0.02, N, O, 03-13-1996, 02-12-1996, 03-22-1996, DELIVER IN PERSON , TRUCK , egular courts above the, 1, null, null, null, null).
CONTEXT: COPY lineitem_alter_220006, line 1: "1|155190|7706|1|17|21168.23|0.04|0.02|N|O|1996-03-13|1996-02-12|1996-03-22|DELIVER IN PERSON|TRUCK|e..."
ERROR: null value in column "int_column2" violates not-null constraint
DETAIL: Failing row contains (1, 155190, 7706, 1, 17.00, 21168.23, 0.04, 0.02, N, O, 03-13-1996, 02-12-1996, 03-22-1996, DELIVER IN PERSON , TRUCK , egular courts above the, 1, null, null, null, null).
CONTEXT: COPY lineitem_alter_220006, line 1: "1|155190|7706|1|17|21168.23|0.04|0.02|N|O|1996-03-13|1996-02-12|1996-03-22|DELIVER IN PERSON|TRUCK|e..."
\stage: failed to replicate shard to enough replicas
DETAIL: Failing row contains (1, 155190, 7706, 1, 17.00, 21168.23, 0.04, 0.02, N, O, 1996-03-13, 1996-02-12, 1996-03-22, DELIVER IN PERSON , TRUCK , egular courts above the, 1, null, null, null, null).
-- Verify that DROP NOT NULL works
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP NOT NULL;
\d lineitem_alter
@ -204,8 +200,8 @@ ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP NOT NULL;
int_column2 | integer |
null_column | integer |
-- \stage should succeed now
\STAGE lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
-- \COPY should succeed now
\COPY lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
SELECT count(*) from lineitem_alter;
count
-------
@ -474,7 +470,7 @@ SELECT master_create_worker_shards('test_ab', 8, 2);
INSERT INTO test_ab VALUES (2, 10);
INSERT INTO test_ab VALUES (2, 11);
CREATE UNIQUE INDEX temp_unique_index_1 ON test_ab(a);
WARNING: could not create unique index "temp_unique_index_1_220016"
WARNING: could not create unique index "temp_unique_index_1_220021"
DETAIL: Key (a)=(2) is duplicated.
CONTEXT: while executing command on localhost:57638
ERROR: could not execute DDL command on worker node shards
@ -605,15 +601,17 @@ SET citus.enable_ddl_propagation to true;
SELECT master_apply_delete_command('DELETE FROM lineitem_alter');
master_apply_delete_command
-----------------------------
9
14
(1 row)
DROP TABLE lineitem_alter;
-- check that nothing's left over on workers
-- check that nothing's left over on workers, other than the leftover shard created
-- during the unsuccessful COPY
\c - - - :worker_1_port
SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%';
relname
---------
(0 rows)
-----------------------
lineitem_alter_220009
(1 row)
\c - - - :master_port

View File

@ -50,8 +50,8 @@ SELECT set_config('citus.shard_replication_factor', '2', false);
2
(1 row)
\STAGE multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data'
\STAGE multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data'
\COPY multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data'
\COPY multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data'
-- Place 'right' table only on the primary worker
SELECT set_config('citus.shard_replication_factor', '1', false);
set_config
@ -59,7 +59,7 @@ SELECT set_config('citus.shard_replication_factor', '1', false);
1
(1 row)
\STAGE multi_append_table_to_shard_right FROM '@abs_srcdir@/data/agg.data'
\COPY multi_append_table_to_shard_right FROM '@abs_srcdir@/data/agg.data'
-- Reset shard replication factor to ensure tasks will be assigned to both workers
SELECT set_config('citus.shard_replication_factor', '2', false);
set_config

View File

@ -12,7 +12,7 @@ SELECT master_create_distributed_table('tpch.nation', 'n_nationkey', 'append');
(1 row)
\STAGE tpch.nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
\COPY tpch.nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
SELECT count(*) from tpch.nation;
count
-------

View File

@ -7,8 +7,8 @@
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 100200300400500;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 9000000;
-- Stage additional data to start using large shard identifiers.
\STAGE lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\STAGE lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\COPY lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\COPY lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
-- Query #1 from the TPC-H decision support benchmark.
SELECT
l_returnflag,

View File

@ -19,9 +19,9 @@ SELECT master_create_distributed_table('customer_delete_protocol', 'c_custkey',
(1 row)
\STAGE customer_delete_protocol FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
\STAGE customer_delete_protocol FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|'
\STAGE customer_delete_protocol FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|'
\COPY customer_delete_protocol FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
\COPY customer_delete_protocol FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|'
\COPY customer_delete_protocol FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|'
-- Testing master_apply_delete_command
-- Check that we don't support conditions on columns other than partition key.
SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol

View File

@ -62,10 +62,10 @@ FROM
ERROR: cannot perform distributed planning on this query
DETAIL: Shards of relations in outer join queries must have 1-to-1 shard partitioning
-- Left table is a large table
\STAGE multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-10.data' with delimiter '|'
\STAGE multi_outer_join_left FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|'
\COPY multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-10.data' with delimiter '|'
\COPY multi_outer_join_left FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|'
-- Right table is a small table
\STAGE multi_outer_join_right FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|'
\COPY multi_outer_join_right FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|'
-- Make sure we do not crash if one table has no shards
SELECT
min(l_custkey), max(l_custkey)
@ -84,7 +84,7 @@ LOG: join order: [ "multi_outer_join_third" ][ broadcast join "multi_outer_join
(1 row)
-- Third table is a single shard table with all data
\STAGE multi_outer_join_third FROM '@abs_srcdir@/data/customer-1-30.data' with delimiter '|'
\COPY multi_outer_join_third FROM '@abs_srcdir@/data/customer-1-30.data' with delimiter '|'
-- Regular outer join should return results for all rows
SELECT
min(l_custkey), max(l_custkey)
@ -202,7 +202,7 @@ LOG: join order: [ "multi_outer_join_left" ][ broadcast join "multi_outer_join_
(1 row)
-- Turn the right table into a large table
\STAGE multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
\COPY multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
-- Shards do not have 1-1 matching. We should error here.
SELECT
min(l_custkey), max(l_custkey)
@ -224,10 +224,10 @@ SELECT * FROM master_apply_delete_command('DELETE FROM multi_outer_join_right');
(1 row)
-- reload shards with 1-1 matching
\STAGE multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|'
\STAGE multi_outer_join_left FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
\STAGE multi_outer_join_right FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|'
\STAGE multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
\COPY multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|'
\COPY multi_outer_join_left FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
\COPY multi_outer_join_right FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|'
\COPY multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
-- multi_outer_join_third is a single shard table
-- Regular left join should work as expected
SELECT
@ -754,7 +754,7 @@ LIMIT 20;
ERROR: cannot perform distributed planning on this query
DETAIL: Subqueries in outer joins are not supported
-- Add a shard to the left table that overlaps with multiple shards in the right
\STAGE multi_outer_join_left FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
\COPY multi_outer_join_left FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
-- All outer joins should error out
SELECT
min(l_custkey), max(l_custkey)

View File

@ -7,25 +7,11 @@
-- policy is left to the default value (round-robin) to test the common install case.
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 290000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 290000;
\STAGE lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\STAGE lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\STAGE orders FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
\STAGE orders FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
\STAGE customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
\STAGE nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
\STAGE part FROM '@abs_srcdir@/data/part.data' with delimiter '|'
\STAGE supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|'
-- check that we error out if we try to stage into a hash partitioned table
CREATE TABLE nation_hash_partitioned (
n_nationkey integer not null,
n_name char(25) not null,
n_regionkey integer not null,
n_comment varchar(152));
SELECT master_create_distributed_table('nation_hash_partitioned', 'n_nationkey', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
\STAGE nation_hash_partitioned FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
\stage: staging data into hash partitioned tables is not supported
\COPY lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\COPY lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\COPY orders FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
\COPY orders FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
\COPY customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
\COPY nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
\COPY part FROM '@abs_srcdir@/data/part.data' with delimiter '|'
\COPY supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|'

View File

@ -14,7 +14,7 @@ SELECT master_create_distributed_table('large_records_table', 'data_id', 'append
(1 row)
\STAGE large_records_table FROM '@abs_srcdir@/data/large_records.data' with delimiter '|'
\COPY large_records_table FROM '@abs_srcdir@/data/large_records.data' with delimiter '|'
SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard, pg_class
WHERE pg_class.oid=logicalrelid AND relname='large_records_table'
ORDER BY shardid;

View File

@ -6,6 +6,6 @@ ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 280000;
-- We stage more data to customer and part tables to test distributed joins. The
-- staging causes the planner to consider customer and part tables as large, and
-- evaluate plans where some of the underlying tables need to be repartitioned.
\STAGE customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|'
\STAGE customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|'
\STAGE part FROM '@abs_srcdir@/data/part.more.data' with delimiter '|'
\COPY customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|'
\COPY customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|'
\COPY part FROM '@abs_srcdir@/data/part.more.data' with delimiter '|'

View File

@ -83,11 +83,27 @@ FROM
(1 row)
-- Stage data to tables.
SELECT master_create_empty_shard('lineitem_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('lineitem_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14947
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('orders_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('orders_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14946
WHERE shardid = :new_shard_id;
SET citus.shard_max_size TO "1MB";
\STAGE lineitem_subquery FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\STAGE lineitem_subquery FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\STAGE orders_subquery FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
\STAGE orders_subquery FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
\COPY lineitem_subquery FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\COPY lineitem_subquery FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\COPY orders_subquery FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
\COPY orders_subquery FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
-- Check that we error out if shard min/max values are not exactly same.
SELECT
avg(unit_price)
@ -310,9 +326,12 @@ SELECT max(l_orderkey) FROM
14947
(1 row)
-- Load more data to one relation, then test if we error out because of different
-- Add one more shard to one relation, then test if we error out because of different
-- shard counts for joining relations.
\STAGE orders_subquery FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
SELECT master_create_empty_shard('orders_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 15000, shardmaxvalue = 20000
WHERE shardid = :new_shard_id;
SELECT
avg(unit_price)
FROM