Merge pull request #630 from citusdata/replace_stage_with_copy_in_tests

Replace \stage With \copy in Regression Tests

cr: @jasonmp85
pull/729/merge
Jason Petersen 2016-08-22 13:41:10 -06:00 committed by GitHub
commit 2c87244ed4
61 changed files with 610 additions and 456 deletions

View File

@ -1,6 +1,6 @@
#!/usr/bin/env bash
echo "WARNING: copy_to_distributed_table is now deprecated." >&2
echo "HINT: You can use \\COPY on distributed tables, which is a lot faster." >&2
echo "HINT: You can use \\copy on distributed tables, which is a lot faster." >&2
# make bash behave
set -euo pipefail

View File

@ -10,9 +10,9 @@
/multi_large_shardid.out
/multi_master_delete_protocol.out
/multi_outer_join.out
/multi_stage_data.out
/multi_stage_large_records.out
/multi_stage_more_data.out
/multi_load_data.out
/multi_load_large_records.out
/multi_load_more_data.out
/multi_subquery.out
/multi_subquery_0.out
/worker_copy.out

View File

@ -123,7 +123,7 @@ SELECT master_create_worker_shards('test_count_distinct_schema.nation_hash', 4,
(1 row)
\COPY test_count_distinct_schema.nation_hash FROM STDIN with delimiter '|';
\copy test_count_distinct_schema.nation_hash FROM STDIN with delimiter '|';
SET search_path TO public;
SET citus.count_distinct_error_rate TO 0.01;
SELECT COUNT (DISTINCT n_regionkey) FROM test_count_distinct_schema.nation_hash;

View File

@ -98,10 +98,10 @@ SELECT l_quantity, count(*), avg(l_extendedprice), array_agg(l_orderkey) FROM li
GROUP BY l_quantity ORDER BY l_quantity;
l_quantity | count | avg | array_agg
------------+-------+-----------------------+--------------------------------------------------------------------------------------------------
1.00 | 17 | 1477.1258823529411765 | {5543,5633,5634,5698,5766,5856,5857,5986,8997,9026,9158,9184,9220,9222,9348,9383,9476}
2.00 | 19 | 3078.4242105263157895 | {5506,5540,5573,5669,5703,5730,5798,5831,5893,5920,5923,9030,9058,9123,9124,9188,9344,9441,9476}
3.00 | 14 | 4714.0392857142857143 | {5509,5543,5605,5606,5827,9124,9157,9184,9223,9254,9349,9414,9475,9477}
4.00 | 19 | 5929.7136842105263158 | {5504,5507,5508,5511,5538,5764,5766,5826,5829,5862,5959,5985,9091,9120,9281,9347,9382,9440,9473}
1.00 | 17 | 1477.1258823529411765 | {8997,9026,9158,9184,9220,9222,9348,9383,9476,5543,5633,5634,5698,5766,5856,5857,5986}
2.00 | 19 | 3078.4242105263157895 | {9030,9058,9123,9124,9188,9344,9441,9476,5506,5540,5573,5669,5703,5730,5798,5831,5893,5920,5923}
3.00 | 14 | 4714.0392857142857143 | {9124,9157,9184,9223,9254,9349,9414,9475,9477,5509,5543,5605,5606,5827}
4.00 | 19 | 5929.7136842105263158 | {9091,9120,9281,9347,9382,9440,9473,5504,5507,5508,5511,5538,5764,5766,5826,5829,5862,5959,5985}
(4 rows)
SELECT l_quantity, array_agg(extract (month FROM o_orderdate)) AS my_month
@ -109,10 +109,10 @@ SELECT l_quantity, array_agg(extract (month FROM o_orderdate)) AS my_month
AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity;
l_quantity | my_month
------------+------------------------------------------------
1.00 | {9,5,7,5,9,11,11,4,7,7,4,7,4,2,6,3,5}
2.00 | {11,10,8,5,5,12,3,11,7,11,5,7,6,6,10,1,12,6,5}
3.00 | {4,9,8,11,7,10,6,7,8,5,8,9,11,3}
4.00 | {1,5,6,11,12,10,9,6,1,2,5,1,11,6,2,8,2,6,10}
1.00 | {7,7,4,7,4,2,6,3,5,9,5,7,5,9,11,11,4}
2.00 | {7,6,6,10,1,12,6,5,11,10,8,5,5,12,3,11,7,11,5}
3.00 | {10,6,7,8,5,8,9,11,3,4,9,8,11,7}
4.00 | {11,6,2,8,2,6,10,1,5,6,11,12,10,9,6,1,2,5,1}
(4 rows)
SELECT l_quantity, array_agg(l_orderkey * 2 + 1) FROM lineitem WHERE l_quantity < 5
@ -120,10 +120,10 @@ SELECT l_quantity, array_agg(l_orderkey * 2 + 1) FROM lineitem WHERE l_quantity
AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity;
l_quantity | array_agg
------------+---------------------------------------------
1.00 | {11269,11397,11713,11715,11973,18317,18445}
2.00 | {11847,18061,18247,18953}
1.00 | {18317,18445,11269,11397,11713,11715,11973}
2.00 | {18061,18247,18953,11847}
3.00 | {18249,18315,18699,18951,18955}
4.00 | {11653,11659,18241,18765}
4.00 | {18241,18765,11653,11659}
(4 rows)
-- Check that we can execute array_agg() with an expression containing NULL values

View File

@ -390,7 +390,7 @@ ORDER BY
customer_keys.o_custkey DESC
LIMIT 10 OFFSET 20;
DEBUG: push down of limit count: 30
DEBUG: building index "pg_toast_16992_index" on table "pg_toast_16992"
DEBUG: building index "pg_toast_17021_index" on table "pg_toast_17021"
o_custkey | total_order_count
-----------+-------------------
1466 | 1

View File

@ -34,8 +34,8 @@ EXPLAIN (COSTS FALSE, FORMAT TEXT)
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
Distributed Query into pg_merge_job_570000
Executor: Real-Time
Task Count: 6
Tasks Shown: One of 6
Task Count: 8
Tasks Shown: One of 8
-> Task
Node: host=localhost port=57637 dbname=regression
-> HashAggregate
@ -55,8 +55,8 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
{
"Executor": "Real-Time",
"Job": {
"Task Count": 6,
"Tasks Shown": "One of 6",
"Task Count": 8,
"Tasks Shown": "One of 8",
"Tasks": [
{
"Node": "host=localhost port=57637 dbname=regression",
@ -122,8 +122,8 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
<Distributed-Query>
<Executor>Real-Time</Executor>
<Job>
<Task-Count>6</Task-Count>
<Tasks-Shown>One of 6</Tasks-Shown>
<Task-Count>8</Task-Count>
<Tasks-Shown>One of 8</Tasks-Shown>
<Tasks>
<Task>
<Node>host=localhost port=57637 dbname=regression</Node>
@ -193,8 +193,8 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
- Executor: "Real-Time"
Job:
Task Count: 6
Tasks Shown: "One of 6"
Task Count: 8
Tasks Shown: "One of 8"
Tasks:
- Node: "host=localhost port=57637 dbname=regression"
Remote Plan:
@ -232,8 +232,8 @@ EXPLAIN (COSTS FALSE, FORMAT TEXT)
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
Distributed Query into pg_merge_job_570006
Executor: Real-Time
Task Count: 6
Tasks Shown: One of 6
Task Count: 8
Tasks Shown: One of 8
-> Task
Node: host=localhost port=57637 dbname=regression
-> HashAggregate
@ -250,8 +250,8 @@ EXPLAIN (COSTS FALSE, VERBOSE TRUE)
SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem;
Distributed Query into pg_merge_job_570007
Executor: Real-Time
Task Count: 6
Tasks Shown: One of 6
Task Count: 8
Tasks Shown: One of 8
-> Task
Node: host=localhost port=57637 dbname=regression
-> Aggregate
@ -270,8 +270,8 @@ EXPLAIN (COSTS FALSE)
ORDER BY l_quantity LIMIT 10;
Distributed Query into pg_merge_job_570008
Executor: Real-Time
Task Count: 6
Tasks Shown: One of 6
Task Count: 8
Tasks Shown: One of 8
-> Task
Node: host=localhost port=57637 dbname=regression
-> Limit
@ -282,7 +282,7 @@ Distributed Query into pg_merge_job_570008
-> Seq Scan on lineitem_290000 lineitem
Filter: (l_quantity < 5.0)
-> Hash
-> Seq Scan on orders_290006 orders
-> Seq Scan on orders_290008 orders
Master Query
-> Limit
-> Sort
@ -357,8 +357,8 @@ EXPLAIN (COSTS FALSE)
SELECT * FROM lineitem;
Distributed Query into pg_merge_job_570012
Executor: Real-Time
Task Count: 6
Tasks Shown: One of 6
Task Count: 8
Tasks Shown: One of 8
-> Task
Node: host=localhost port=57637 dbname=regression
-> Seq Scan on lineitem_290000 lineitem
@ -370,7 +370,7 @@ EXPLAIN (COSTS FALSE)
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
Distributed Query into pg_merge_job_570013
Executor: Real-Time
Task Count: 3
Task Count: 4
Tasks Shown: All
-> Task
Node: host=localhost port=57637 dbname=regression
@ -380,12 +380,17 @@ Distributed Query into pg_merge_job_570013
-> Task
Node: host=localhost port=57638 dbname=regression
-> Aggregate
-> Seq Scan on lineitem_290003 lineitem
-> Seq Scan on lineitem_290005 lineitem
Filter: (l_orderkey > 9030)
-> Task
Node: host=localhost port=57637 dbname=regression
-> Aggregate
-> Seq Scan on lineitem_290005 lineitem
-> Seq Scan on lineitem_290006 lineitem
Filter: (l_orderkey > 9030)
-> Task
Node: host=localhost port=57638 dbname=regression
-> Aggregate
-> Seq Scan on lineitem_290007 lineitem
Filter: (l_orderkey > 9030)
Master Query
-> Aggregate
@ -403,8 +408,8 @@ EXPLAIN (COSTS FALSE)
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
Distributed Query into pg_merge_job_570016
Executor: Task-Tracker
Task Count: 3
Tasks Shown: One of 3
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=57637 dbname=regression
-> Aggregate
@ -429,7 +434,7 @@ Distributed Query into pg_merge_job_570019
Map Task Count: 1
Merge Task Count: 1
-> MapMergeJob
Map Task Count: 6
Map Task Count: 8
Merge Task Count: 1
Master Query
-> Aggregate
@ -452,7 +457,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Merge Task Count": 1,
"Depended Jobs": [
{
"Map Task Count": 6,
"Map Task Count": 8,
"Merge Task Count": 1
}
]
@ -502,7 +507,7 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
<Merge-Task-Count>1</Merge-Task-Count>
<Depended-Jobs>
<MapMergeJob>
<Map-Task-Count>6</Map-Task-Count>
<Map-Task-Count>8</Map-Task-Count>
<Merge-Task-Count>1</Merge-Task-Count>
</MapMergeJob>
</Depended-Jobs>
@ -548,7 +553,7 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
- Map Task Count: 1
Merge Task Count: 1
Depended Jobs:
- Map Task Count: 6
- Map Task Count: 8
Merge Task Count: 1
Master Query:
- Plan:

View File

@ -5,7 +5,7 @@
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 630000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 630000;
-- Create a table partitioned on integer column and update partition type to
-- hash. Then stage data to this table and update shard min max values with
-- hash. Then load data into this table and update shard min max values with
-- hashed ones. Hash value of 1, 2, 3 and 4 are consecutively -1905060026,
-- 1134484726, -28094569 and -1011077333.
CREATE TABLE orders_hash_partitioned (

View File

@ -5,7 +5,7 @@
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 630000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 630000;
-- Create a table partitioned on integer column and update partition type to
-- hash. Then stage data to this table and update shard min max values with
-- hash. Then load data into this table and update shard min max values with
-- hashed ones. Hash value of 1, 2, 3 and 4 are consecutively -1905060026,
-- 1134484726, -28094569 and -1011077333.
CREATE TABLE orders_hash_partitioned (

View File

@ -99,34 +99,50 @@ SELECT master_create_worker_shards('customer_hash', 2, 1);
EXPLAIN SELECT l1.l_quantity FROM lineitem l1, lineitem l2
WHERE l1.l_orderkey = l2.l_orderkey AND l1.l_quantity > 5;
LOG: join order: [ "lineitem" ][ local partition join "lineitem" ]
DEBUG: join prunable for intervals [1,2496] and [2497,4964]
DEBUG: join prunable for intervals [1,2496] and [4965,5986]
DEBUG: join prunable for intervals [1,2496] and [8997,11554]
DEBUG: join prunable for intervals [1,2496] and [11554,13920]
DEBUG: join prunable for intervals [1,2496] and [13921,14947]
DEBUG: join prunable for intervals [2497,4964] and [1,2496]
DEBUG: join prunable for intervals [2497,4964] and [4965,5986]
DEBUG: join prunable for intervals [2497,4964] and [8997,11554]
DEBUG: join prunable for intervals [2497,4964] and [11554,13920]
DEBUG: join prunable for intervals [2497,4964] and [13921,14947]
DEBUG: join prunable for intervals [4965,5986] and [1,2496]
DEBUG: join prunable for intervals [4965,5986] and [2497,4964]
DEBUG: join prunable for intervals [4965,5986] and [8997,11554]
DEBUG: join prunable for intervals [4965,5986] and [11554,13920]
DEBUG: join prunable for intervals [4965,5986] and [13921,14947]
DEBUG: join prunable for intervals [8997,11554] and [1,2496]
DEBUG: join prunable for intervals [8997,11554] and [2497,4964]
DEBUG: join prunable for intervals [8997,11554] and [4965,5986]
DEBUG: join prunable for intervals [8997,11554] and [13921,14947]
DEBUG: join prunable for intervals [11554,13920] and [1,2496]
DEBUG: join prunable for intervals [11554,13920] and [2497,4964]
DEBUG: join prunable for intervals [11554,13920] and [4965,5986]
DEBUG: join prunable for intervals [11554,13920] and [13921,14947]
DEBUG: join prunable for intervals [13921,14947] and [1,2496]
DEBUG: join prunable for intervals [13921,14947] and [2497,4964]
DEBUG: join prunable for intervals [13921,14947] and [4965,5986]
DEBUG: join prunable for intervals [13921,14947] and [8997,11554]
DEBUG: join prunable for intervals [13921,14947] and [11554,13920]
DEBUG: join prunable for intervals [1,1509] and [2951,4455]
DEBUG: join prunable for intervals [1,1509] and [4480,5986]
DEBUG: join prunable for intervals [1,1509] and [8997,10560]
DEBUG: join prunable for intervals [1,1509] and [10560,12036]
DEBUG: join prunable for intervals [1,1509] and [12036,13473]
DEBUG: join prunable for intervals [1,1509] and [13473,14947]
DEBUG: join prunable for intervals [1509,4964] and [8997,10560]
DEBUG: join prunable for intervals [1509,4964] and [10560,12036]
DEBUG: join prunable for intervals [1509,4964] and [12036,13473]
DEBUG: join prunable for intervals [1509,4964] and [13473,14947]
DEBUG: join prunable for intervals [2951,4455] and [1,1509]
DEBUG: join prunable for intervals [2951,4455] and [4480,5986]
DEBUG: join prunable for intervals [2951,4455] and [8997,10560]
DEBUG: join prunable for intervals [2951,4455] and [10560,12036]
DEBUG: join prunable for intervals [2951,4455] and [12036,13473]
DEBUG: join prunable for intervals [2951,4455] and [13473,14947]
DEBUG: join prunable for intervals [4480,5986] and [1,1509]
DEBUG: join prunable for intervals [4480,5986] and [2951,4455]
DEBUG: join prunable for intervals [4480,5986] and [8997,10560]
DEBUG: join prunable for intervals [4480,5986] and [10560,12036]
DEBUG: join prunable for intervals [4480,5986] and [12036,13473]
DEBUG: join prunable for intervals [4480,5986] and [13473,14947]
DEBUG: join prunable for intervals [8997,10560] and [1,1509]
DEBUG: join prunable for intervals [8997,10560] and [1509,4964]
DEBUG: join prunable for intervals [8997,10560] and [2951,4455]
DEBUG: join prunable for intervals [8997,10560] and [4480,5986]
DEBUG: join prunable for intervals [8997,10560] and [12036,13473]
DEBUG: join prunable for intervals [8997,10560] and [13473,14947]
DEBUG: join prunable for intervals [10560,12036] and [1,1509]
DEBUG: join prunable for intervals [10560,12036] and [1509,4964]
DEBUG: join prunable for intervals [10560,12036] and [2951,4455]
DEBUG: join prunable for intervals [10560,12036] and [4480,5986]
DEBUG: join prunable for intervals [10560,12036] and [13473,14947]
DEBUG: join prunable for intervals [12036,13473] and [1,1509]
DEBUG: join prunable for intervals [12036,13473] and [1509,4964]
DEBUG: join prunable for intervals [12036,13473] and [2951,4455]
DEBUG: join prunable for intervals [12036,13473] and [4480,5986]
DEBUG: join prunable for intervals [12036,13473] and [8997,10560]
DEBUG: join prunable for intervals [13473,14947] and [1,1509]
DEBUG: join prunable for intervals [13473,14947] and [1509,4964]
DEBUG: join prunable for intervals [13473,14947] and [2951,4455]
DEBUG: join prunable for intervals [13473,14947] and [4480,5986]
DEBUG: join prunable for intervals [13473,14947] and [8997,10560]
DEBUG: join prunable for intervals [13473,14947] and [10560,12036]
QUERY PLAN
------------------------------------------------------------
explain statements for distributed queries are not enabled

View File

@ -9,7 +9,7 @@ SET citus.log_multi_join_order TO TRUE;
SET client_min_messages TO LOG;
-- Change configuration to treat lineitem, orders, customer, and part tables as
-- large. The following queries are basically the same as the ones in tpch_small
-- except that more data has been staged to customer and part tables. Therefore,
-- except that more data has been loaded into customer and part tables. Therefore,
-- we will apply different distributed join strategies for these queries.
SET citus.large_table_shard_count TO 2;
-- Query #6 from the TPC-H decision support benchmark

View File

@ -11,12 +11,14 @@ SET client_min_messages TO DEBUG2;
SET citus.large_table_shard_count TO 2;
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
WHERE l_orderkey = o_orderkey;
DEBUG: join prunable for intervals [1,2496] and [8997,14946]
DEBUG: join prunable for intervals [2497,4964] and [8997,14946]
DEBUG: join prunable for intervals [4965,5986] and [8997,14946]
DEBUG: join prunable for intervals [8997,11554] and [1,5986]
DEBUG: join prunable for intervals [11554,13920] and [1,5986]
DEBUG: join prunable for intervals [13921,14947] and [1,5986]
DEBUG: join prunable for intervals [1,1509] and [8997,14946]
DEBUG: join prunable for intervals [1509,2951] and [8997,14946]
DEBUG: join prunable for intervals [2951,4455] and [8997,14946]
DEBUG: join prunable for intervals [4480,5986] and [8997,14946]
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
sum | avg
-------+--------------------
36086 | 3.0076679446574429
@ -27,9 +29,11 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
DEBUG: predicate pruning for shardId 290000
DEBUG: predicate pruning for shardId 290001
DEBUG: predicate pruning for shardId 290002
DEBUG: join prunable for intervals [8997,11554] and [1,5986]
DEBUG: join prunable for intervals [11554,13920] and [1,5986]
DEBUG: join prunable for intervals [13921,14947] and [1,5986]
DEBUG: predicate pruning for shardId 290003
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
sum | avg
-------+--------------------
17996 | 3.0194630872483221
@ -45,6 +49,8 @@ DEBUG: predicate pruning for shardId 290002
DEBUG: predicate pruning for shardId 290003
DEBUG: predicate pruning for shardId 290004
DEBUG: predicate pruning for shardId 290005
DEBUG: predicate pruning for shardId 290006
DEBUG: predicate pruning for shardId 290007
sum | avg
-----+-----
|
@ -58,10 +64,12 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
DEBUG: predicate pruning for shardId 290000
DEBUG: predicate pruning for shardId 290001
DEBUG: predicate pruning for shardId 290002
DEBUG: predicate pruning for shardId 290007
DEBUG: join prunable for intervals [8997,11554] and [1,5986]
DEBUG: join prunable for intervals [11554,13920] and [1,5986]
DEBUG: join prunable for intervals [13921,14947] and [1,5986]
DEBUG: predicate pruning for shardId 290003
DEBUG: predicate pruning for shardId 290009
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
sum | avg
-----+-----
|

View File

@ -45,40 +45,48 @@ GROUP BY
ORDER BY
l_partkey, o_orderkey;
DEBUG: StartTransactionCommand
DEBUG: join prunable for intervals [1,2496] and [8997,14946]
DEBUG: join prunable for intervals [2497,4964] and [8997,14946]
DEBUG: join prunable for intervals [4965,5986] and [8997,14946]
DEBUG: join prunable for intervals [8997,11554] and [1,5986]
DEBUG: join prunable for intervals [11554,13920] and [1,5986]
DEBUG: join prunable for intervals [13921,14947] and [1,5986]
DEBUG: join prunable for intervals [1,1509] and [8997,14946]
DEBUG: join prunable for intervals [1509,4964] and [8997,14946]
DEBUG: join prunable for intervals [2951,4455] and [8997,14946]
DEBUG: join prunable for intervals [4480,5986] and [8997,14946]
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
DEBUG: generated sql query for job 1250 and task 3
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290000 lineitem JOIN orders_290006 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290000 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DEBUG: generated sql query for job 1250 and task 6
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290001 lineitem JOIN orders_290006 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290001 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DEBUG: generated sql query for job 1250 and task 9
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290002 lineitem JOIN orders_290006 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290002 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DEBUG: generated sql query for job 1250 and task 12
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290003 lineitem JOIN orders_290007 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290003 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DEBUG: generated sql query for job 1250 and task 15
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290004 lineitem JOIN orders_290007 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290004 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DEBUG: generated sql query for job 1250 and task 18
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290005 lineitem JOIN orders_290007 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290005 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DEBUG: generated sql query for job 1250 and task 21
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290006 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DEBUG: generated sql query for job 1250 and task 24
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290007 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DEBUG: assigned task 3 to node localhost:57637
DEBUG: assigned task 6 to node localhost:57638
DEBUG: assigned task 9 to node localhost:57637
DEBUG: assigned task 12 to node localhost:57638
DEBUG: assigned task 15 to node localhost:57637
DEBUG: assigned task 18 to node localhost:57638
DEBUG: assigned task 21 to node localhost:57637
DEBUG: assigned task 24 to node localhost:57638
DEBUG: join prunable for intervals [1,1000] and [6001,7000]
DEBUG: join prunable for intervals [6001,7000] and [1,1000]
DEBUG: generated sql query for job 1251 and task 3
DETAIL: query string: "SELECT "pg_merge_job_1250.task_000019".intermediate_column_1250_0, "pg_merge_job_1250.task_000019".intermediate_column_1250_1, "pg_merge_job_1250.task_000019".intermediate_column_1250_2, "pg_merge_job_1250.task_000019".intermediate_column_1250_3, "pg_merge_job_1250.task_000019".intermediate_column_1250_4 FROM (pg_merge_job_1250.task_000019 "pg_merge_job_1250.task_000019" JOIN part_290010 part ON (("pg_merge_job_1250.task_000019".intermediate_column_1250_0 = part.p_partkey))) WHERE (part.p_size > 8)"
DETAIL: query string: "SELECT "pg_merge_job_1250.task_000025".intermediate_column_1250_0, "pg_merge_job_1250.task_000025".intermediate_column_1250_1, "pg_merge_job_1250.task_000025".intermediate_column_1250_2, "pg_merge_job_1250.task_000025".intermediate_column_1250_3, "pg_merge_job_1250.task_000025".intermediate_column_1250_4 FROM (pg_merge_job_1250.task_000025 "pg_merge_job_1250.task_000025" JOIN part_290012 part ON (("pg_merge_job_1250.task_000025".intermediate_column_1250_0 = part.p_partkey))) WHERE (part.p_size > 8)"
DEBUG: generated sql query for job 1251 and task 6
DETAIL: query string: "SELECT "pg_merge_job_1250.task_000026".intermediate_column_1250_0, "pg_merge_job_1250.task_000026".intermediate_column_1250_1, "pg_merge_job_1250.task_000026".intermediate_column_1250_2, "pg_merge_job_1250.task_000026".intermediate_column_1250_3, "pg_merge_job_1250.task_000026".intermediate_column_1250_4 FROM (pg_merge_job_1250.task_000026 "pg_merge_job_1250.task_000026" JOIN part_280002 part ON (("pg_merge_job_1250.task_000026".intermediate_column_1250_0 = part.p_partkey))) WHERE (part.p_size > 8)"
DETAIL: query string: "SELECT "pg_merge_job_1250.task_000034".intermediate_column_1250_0, "pg_merge_job_1250.task_000034".intermediate_column_1250_1, "pg_merge_job_1250.task_000034".intermediate_column_1250_2, "pg_merge_job_1250.task_000034".intermediate_column_1250_3, "pg_merge_job_1250.task_000034".intermediate_column_1250_4 FROM (pg_merge_job_1250.task_000034 "pg_merge_job_1250.task_000034" JOIN part_280002 part ON (("pg_merge_job_1250.task_000034".intermediate_column_1250_0 = part.p_partkey))) WHERE (part.p_size > 8)"
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 19
DETAIL: Creating dependency on merge taskId 25
DEBUG: pruning merge fetch taskId 4
DETAIL: Creating dependency on merge taskId 26
DETAIL: Creating dependency on merge taskId 34
DEBUG: assigned task 6 to node localhost:57637
DEBUG: assigned task 3 to node localhost:57638
DEBUG: join prunable for intervals [1,1000] and [1001,2000]
@ -88,7 +96,7 @@ DEBUG: join prunable for intervals [1001,2000] and [6001,7000]
DEBUG: join prunable for intervals [6001,7000] and [1,1000]
DEBUG: join prunable for intervals [6001,7000] and [1001,2000]
DEBUG: generated sql query for job 1252 and task 3
DETAIL: query string: "SELECT "pg_merge_job_1251.task_000007".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000007".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000007 "pg_merge_job_1251.task_000007" JOIN customer_290008 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000007".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000007".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000007".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000007".intermediate_column_1251_0, "pg_merge_job_1251.task_000007".intermediate_column_1251_1"
DETAIL: query string: "SELECT "pg_merge_job_1251.task_000007".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000007".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000007 "pg_merge_job_1251.task_000007" JOIN customer_290010 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000007".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000007".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000007".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000007".intermediate_column_1251_0, "pg_merge_job_1251.task_000007".intermediate_column_1251_1"
DEBUG: generated sql query for job 1252 and task 6
DETAIL: query string: "SELECT "pg_merge_job_1251.task_000010".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000010".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000010 "pg_merge_job_1251.task_000010" JOIN customer_280001 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000010".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000010".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000010".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000010".intermediate_column_1251_0, "pg_merge_job_1251.task_000010".intermediate_column_1251_1"
DEBUG: generated sql query for job 1252 and task 9
@ -166,16 +174,22 @@ DEBUG: generated sql query for job 1253 and task 10
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290004 lineitem WHERE (l_quantity < 5.0)"
DEBUG: generated sql query for job 1253 and task 12
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290005 lineitem WHERE (l_quantity < 5.0)"
DEBUG: generated sql query for job 1253 and task 14
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290006 lineitem WHERE (l_quantity < 5.0)"
DEBUG: generated sql query for job 1253 and task 16
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290007 lineitem WHERE (l_quantity < 5.0)"
DEBUG: assigned task 2 to node localhost:57637
DEBUG: assigned task 4 to node localhost:57638
DEBUG: assigned task 6 to node localhost:57637
DEBUG: assigned task 8 to node localhost:57638
DEBUG: assigned task 10 to node localhost:57637
DEBUG: assigned task 12 to node localhost:57638
DEBUG: assigned task 14 to node localhost:57637
DEBUG: assigned task 16 to node localhost:57638
DEBUG: generated sql query for job 1254 and task 2
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290006 orders WHERE (o_totalprice <> 4.0)"
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290008 orders WHERE (o_totalprice <> 4.0)"
DEBUG: generated sql query for job 1254 and task 4
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290007 orders WHERE (o_totalprice <> 4.0)"
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290009 orders WHERE (o_totalprice <> 4.0)"
DEBUG: assigned task 2 to node localhost:57637
DEBUG: assigned task 4 to node localhost:57638
DEBUG: join prunable for task partitionId 0 and 1
@ -191,27 +205,27 @@ DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
DEBUG: generated sql query for job 1255 and task 3
DETAIL: query string: "SELECT "pg_merge_job_1253.task_000013".intermediate_column_1253_0 AS l_partkey, "pg_merge_job_1254.task_000005".intermediate_column_1254_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1253.task_000013 "pg_merge_job_1253.task_000013" JOIN pg_merge_job_1254.task_000005 "pg_merge_job_1254.task_000005" ON (("pg_merge_job_1253.task_000013".intermediate_column_1253_1 = "pg_merge_job_1254.task_000005".intermediate_column_1254_1))) WHERE true GROUP BY "pg_merge_job_1253.task_000013".intermediate_column_1253_0, "pg_merge_job_1254.task_000005".intermediate_column_1254_0"
DETAIL: query string: "SELECT "pg_merge_job_1253.task_000017".intermediate_column_1253_0 AS l_partkey, "pg_merge_job_1254.task_000005".intermediate_column_1254_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1253.task_000017 "pg_merge_job_1253.task_000017" JOIN pg_merge_job_1254.task_000005 "pg_merge_job_1254.task_000005" ON (("pg_merge_job_1253.task_000017".intermediate_column_1253_1 = "pg_merge_job_1254.task_000005".intermediate_column_1254_1))) WHERE true GROUP BY "pg_merge_job_1253.task_000017".intermediate_column_1253_0, "pg_merge_job_1254.task_000005".intermediate_column_1254_0"
DEBUG: generated sql query for job 1255 and task 6
DETAIL: query string: "SELECT "pg_merge_job_1253.task_000020".intermediate_column_1253_0 AS l_partkey, "pg_merge_job_1254.task_000008".intermediate_column_1254_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1253.task_000020 "pg_merge_job_1253.task_000020" JOIN pg_merge_job_1254.task_000008 "pg_merge_job_1254.task_000008" ON (("pg_merge_job_1253.task_000020".intermediate_column_1253_1 = "pg_merge_job_1254.task_000008".intermediate_column_1254_1))) WHERE true GROUP BY "pg_merge_job_1253.task_000020".intermediate_column_1253_0, "pg_merge_job_1254.task_000008".intermediate_column_1254_0"
DETAIL: query string: "SELECT "pg_merge_job_1253.task_000026".intermediate_column_1253_0 AS l_partkey, "pg_merge_job_1254.task_000008".intermediate_column_1254_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1253.task_000026 "pg_merge_job_1253.task_000026" JOIN pg_merge_job_1254.task_000008 "pg_merge_job_1254.task_000008" ON (("pg_merge_job_1253.task_000026".intermediate_column_1253_1 = "pg_merge_job_1254.task_000008".intermediate_column_1254_1))) WHERE true GROUP BY "pg_merge_job_1253.task_000026".intermediate_column_1253_0, "pg_merge_job_1254.task_000008".intermediate_column_1254_0"
DEBUG: generated sql query for job 1255 and task 9
DETAIL: query string: "SELECT "pg_merge_job_1253.task_000027".intermediate_column_1253_0 AS l_partkey, "pg_merge_job_1254.task_000011".intermediate_column_1254_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1253.task_000027 "pg_merge_job_1253.task_000027" JOIN pg_merge_job_1254.task_000011 "pg_merge_job_1254.task_000011" ON (("pg_merge_job_1253.task_000027".intermediate_column_1253_1 = "pg_merge_job_1254.task_000011".intermediate_column_1254_1))) WHERE true GROUP BY "pg_merge_job_1253.task_000027".intermediate_column_1253_0, "pg_merge_job_1254.task_000011".intermediate_column_1254_0"
DETAIL: query string: "SELECT "pg_merge_job_1253.task_000035".intermediate_column_1253_0 AS l_partkey, "pg_merge_job_1254.task_000011".intermediate_column_1254_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1253.task_000035 "pg_merge_job_1253.task_000035" JOIN pg_merge_job_1254.task_000011 "pg_merge_job_1254.task_000011" ON (("pg_merge_job_1253.task_000035".intermediate_column_1253_1 = "pg_merge_job_1254.task_000011".intermediate_column_1254_1))) WHERE true GROUP BY "pg_merge_job_1253.task_000035".intermediate_column_1253_0, "pg_merge_job_1254.task_000011".intermediate_column_1254_0"
DEBUG: generated sql query for job 1255 and task 12
DETAIL: query string: "SELECT "pg_merge_job_1253.task_000034".intermediate_column_1253_0 AS l_partkey, "pg_merge_job_1254.task_000014".intermediate_column_1254_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1253.task_000034 "pg_merge_job_1253.task_000034" JOIN pg_merge_job_1254.task_000014 "pg_merge_job_1254.task_000014" ON (("pg_merge_job_1253.task_000034".intermediate_column_1253_1 = "pg_merge_job_1254.task_000014".intermediate_column_1254_1))) WHERE true GROUP BY "pg_merge_job_1253.task_000034".intermediate_column_1253_0, "pg_merge_job_1254.task_000014".intermediate_column_1254_0"
DETAIL: query string: "SELECT "pg_merge_job_1253.task_000044".intermediate_column_1253_0 AS l_partkey, "pg_merge_job_1254.task_000014".intermediate_column_1254_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1253.task_000044 "pg_merge_job_1253.task_000044" JOIN pg_merge_job_1254.task_000014 "pg_merge_job_1254.task_000014" ON (("pg_merge_job_1253.task_000044".intermediate_column_1253_1 = "pg_merge_job_1254.task_000014".intermediate_column_1254_1))) WHERE true GROUP BY "pg_merge_job_1253.task_000044".intermediate_column_1253_0, "pg_merge_job_1254.task_000014".intermediate_column_1254_0"
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 13
DETAIL: Creating dependency on merge taskId 17
DEBUG: pruning merge fetch taskId 2
DETAIL: Creating dependency on merge taskId 5
DEBUG: pruning merge fetch taskId 4
DETAIL: Creating dependency on merge taskId 20
DETAIL: Creating dependency on merge taskId 26
DEBUG: pruning merge fetch taskId 5
DETAIL: Creating dependency on merge taskId 8
DEBUG: pruning merge fetch taskId 7
DETAIL: Creating dependency on merge taskId 27
DETAIL: Creating dependency on merge taskId 35
DEBUG: pruning merge fetch taskId 8
DETAIL: Creating dependency on merge taskId 11
DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 34
DETAIL: Creating dependency on merge taskId 44
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 14
DEBUG: assigned task 3 to node localhost:57638

View File

@ -42,8 +42,8 @@ FROM
WHERE
o_custkey = c_custkey AND
o_orderkey < 0;
DEBUG: predicate pruning for shardId 290006
DEBUG: predicate pruning for shardId 290007
DEBUG: predicate pruning for shardId 290008
DEBUG: predicate pruning for shardId 290009
count
-------
@ -58,7 +58,7 @@ FROM
WHERE
o_custkey = c_custkey AND
c_custkey < 0;
DEBUG: predicate pruning for shardId 290008
DEBUG: predicate pruning for shardId 290010
DEBUG: predicate pruning for shardId 280001
DEBUG: predicate pruning for shardId 280000
count
@ -88,19 +88,19 @@ DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 13
DETAIL: Creating dependency on merge taskId 17
DEBUG: pruning merge fetch taskId 2
DETAIL: Creating dependency on merge taskId 7
DEBUG: pruning merge fetch taskId 4
DETAIL: Creating dependency on merge taskId 20
DETAIL: Creating dependency on merge taskId 26
DEBUG: pruning merge fetch taskId 5
DETAIL: Creating dependency on merge taskId 11
DEBUG: pruning merge fetch taskId 7
DETAIL: Creating dependency on merge taskId 27
DETAIL: Creating dependency on merge taskId 35
DEBUG: pruning merge fetch taskId 8
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 34
DETAIL: Creating dependency on merge taskId 44
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 19
count
@ -123,6 +123,8 @@ DEBUG: predicate pruning for shardId 290002
DEBUG: predicate pruning for shardId 290003
DEBUG: predicate pruning for shardId 290004
DEBUG: predicate pruning for shardId 290005
DEBUG: predicate pruning for shardId 290006
DEBUG: predicate pruning for shardId 290007
count
-------

View File

@ -72,58 +72,110 @@ DEBUG: assigned task 18 to node localhost:57637
DEBUG: assigned task 12 to node localhost:57638
DEBUG: assigned task 3 to node localhost:57637
DEBUG: assigned task 6 to node localhost:57638
DEBUG: join prunable for intervals [1,2496] and [2497,4964]
DEBUG: join prunable for intervals [1,2496] and [4965,5986]
DEBUG: join prunable for intervals [1,2496] and [8997,11554]
DEBUG: join prunable for intervals [1,2496] and [11554,13920]
DEBUG: join prunable for intervals [1,2496] and [13921,14947]
DEBUG: join prunable for intervals [2497,4964] and [1,2496]
DEBUG: join prunable for intervals [2497,4964] and [4965,5986]
DEBUG: join prunable for intervals [2497,4964] and [8997,11554]
DEBUG: join prunable for intervals [2497,4964] and [11554,13920]
DEBUG: join prunable for intervals [2497,4964] and [13921,14947]
DEBUG: join prunable for intervals [4965,5986] and [1,2496]
DEBUG: join prunable for intervals [4965,5986] and [2497,4964]
DEBUG: join prunable for intervals [4965,5986] and [8997,11554]
DEBUG: join prunable for intervals [4965,5986] and [11554,13920]
DEBUG: join prunable for intervals [4965,5986] and [13921,14947]
DEBUG: join prunable for intervals [8997,11554] and [1,2496]
DEBUG: join prunable for intervals [8997,11554] and [2497,4964]
DEBUG: join prunable for intervals [8997,11554] and [4965,5986]
DEBUG: join prunable for intervals [8997,11554] and [13921,14947]
DEBUG: join prunable for intervals [11554,13920] and [1,2496]
DEBUG: join prunable for intervals [11554,13920] and [2497,4964]
DEBUG: join prunable for intervals [11554,13920] and [4965,5986]
DEBUG: join prunable for intervals [11554,13920] and [13921,14947]
DEBUG: join prunable for intervals [13921,14947] and [1,2496]
DEBUG: join prunable for intervals [13921,14947] and [2497,4964]
DEBUG: join prunable for intervals [13921,14947] and [4965,5986]
DEBUG: join prunable for intervals [13921,14947] and [8997,11554]
DEBUG: join prunable for intervals [13921,14947] and [11554,13920]
DEBUG: join prunable for intervals [1,1509] and [2951,4455]
DEBUG: join prunable for intervals [1,1509] and [4480,5986]
DEBUG: join prunable for intervals [1,1509] and [8997,10560]
DEBUG: join prunable for intervals [1,1509] and [10560,12036]
DEBUG: join prunable for intervals [1,1509] and [12036,13473]
DEBUG: join prunable for intervals [1,1509] and [13473,14947]
DEBUG: join prunable for intervals [1509,4964] and [8997,10560]
DEBUG: join prunable for intervals [1509,4964] and [10560,12036]
DEBUG: join prunable for intervals [1509,4964] and [12036,13473]
DEBUG: join prunable for intervals [1509,4964] and [13473,14947]
DEBUG: join prunable for intervals [2951,4455] and [1,1509]
DEBUG: join prunable for intervals [2951,4455] and [4480,5986]
DEBUG: join prunable for intervals [2951,4455] and [8997,10560]
DEBUG: join prunable for intervals [2951,4455] and [10560,12036]
DEBUG: join prunable for intervals [2951,4455] and [12036,13473]
DEBUG: join prunable for intervals [2951,4455] and [13473,14947]
DEBUG: join prunable for intervals [4480,5986] and [1,1509]
DEBUG: join prunable for intervals [4480,5986] and [2951,4455]
DEBUG: join prunable for intervals [4480,5986] and [8997,10560]
DEBUG: join prunable for intervals [4480,5986] and [10560,12036]
DEBUG: join prunable for intervals [4480,5986] and [12036,13473]
DEBUG: join prunable for intervals [4480,5986] and [13473,14947]
DEBUG: join prunable for intervals [8997,10560] and [1,1509]
DEBUG: join prunable for intervals [8997,10560] and [1509,4964]
DEBUG: join prunable for intervals [8997,10560] and [2951,4455]
DEBUG: join prunable for intervals [8997,10560] and [4480,5986]
DEBUG: join prunable for intervals [8997,10560] and [12036,13473]
DEBUG: join prunable for intervals [8997,10560] and [13473,14947]
DEBUG: join prunable for intervals [10560,12036] and [1,1509]
DEBUG: join prunable for intervals [10560,12036] and [1509,4964]
DEBUG: join prunable for intervals [10560,12036] and [2951,4455]
DEBUG: join prunable for intervals [10560,12036] and [4480,5986]
DEBUG: join prunable for intervals [10560,12036] and [13473,14947]
DEBUG: join prunable for intervals [12036,13473] and [1,1509]
DEBUG: join prunable for intervals [12036,13473] and [1509,4964]
DEBUG: join prunable for intervals [12036,13473] and [2951,4455]
DEBUG: join prunable for intervals [12036,13473] and [4480,5986]
DEBUG: join prunable for intervals [12036,13473] and [8997,10560]
DEBUG: join prunable for intervals [13473,14947] and [1,1509]
DEBUG: join prunable for intervals [13473,14947] and [1509,4964]
DEBUG: join prunable for intervals [13473,14947] and [2951,4455]
DEBUG: join prunable for intervals [13473,14947] and [4480,5986]
DEBUG: join prunable for intervals [13473,14947] and [8997,10560]
DEBUG: join prunable for intervals [13473,14947] and [10560,12036]
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 19
DEBUG: pruning merge fetch taskId 4
DETAIL: Creating dependency on merge taskId 26
DETAIL: Creating dependency on merge taskId 19
DEBUG: pruning merge fetch taskId 7
DETAIL: Creating dependency on merge taskId 33
DETAIL: Creating dependency on merge taskId 26
DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 40
DETAIL: Creating dependency on merge taskId 26
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 40
DETAIL: Creating dependency on merge taskId 26
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 47
DETAIL: Creating dependency on merge taskId 26
DEBUG: pruning merge fetch taskId 19
DETAIL: Creating dependency on merge taskId 47
DETAIL: Creating dependency on merge taskId 33
DEBUG: pruning merge fetch taskId 22
DETAIL: Creating dependency on merge taskId 33
DEBUG: pruning merge fetch taskId 25
DETAIL: Creating dependency on merge taskId 40
DEBUG: pruning merge fetch taskId 28
DETAIL: Creating dependency on merge taskId 40
DEBUG: pruning merge fetch taskId 31
DETAIL: Creating dependency on merge taskId 47
DEBUG: pruning merge fetch taskId 34
DETAIL: Creating dependency on merge taskId 47
DEBUG: pruning merge fetch taskId 37
DETAIL: Creating dependency on merge taskId 54
DEBUG: pruning merge fetch taskId 40
DETAIL: Creating dependency on merge taskId 54
DEBUG: pruning merge fetch taskId 43
DETAIL: Creating dependency on merge taskId 54
DEBUG: pruning merge fetch taskId 46
DETAIL: Creating dependency on merge taskId 61
DEBUG: pruning merge fetch taskId 49
DETAIL: Creating dependency on merge taskId 61
DEBUG: pruning merge fetch taskId 52
DETAIL: Creating dependency on merge taskId 61
DEBUG: pruning merge fetch taskId 55
DETAIL: Creating dependency on merge taskId 68
DEBUG: pruning merge fetch taskId 58
DETAIL: Creating dependency on merge taskId 68
DEBUG: assigned task 3 to node localhost:57637
DEBUG: assigned task 6 to node localhost:57638
DEBUG: assigned task 21 to node localhost:57638
DEBUG: assigned task 9 to node localhost:57637
DEBUG: assigned task 12 to node localhost:57638
DEBUG: assigned task 18 to node localhost:57637
DEBUG: assigned task 24 to node localhost:57638
DEBUG: propagating assignment from merge task 40 to constrained sql task 15
DEBUG: propagating assignment from merge task 47 to constrained sql task 21
DEBUG: assigned task 27 to node localhost:57638
DEBUG: assigned task 33 to node localhost:57637
DEBUG: assigned task 48 to node localhost:57638
DEBUG: assigned task 39 to node localhost:57637
DEBUG: assigned task 57 to node localhost:57638
DEBUG: propagating assignment from merge task 19 to constrained sql task 6
DEBUG: propagating assignment from merge task 26 to constrained sql task 12
DEBUG: propagating assignment from merge task 26 to constrained sql task 15
DEBUG: propagating assignment from merge task 26 to constrained sql task 18
DEBUG: propagating assignment from merge task 33 to constrained sql task 24
DEBUG: propagating assignment from merge task 40 to constrained sql task 30
DEBUG: propagating assignment from merge task 47 to constrained sql task 36
DEBUG: propagating assignment from merge task 54 to constrained sql task 42
DEBUG: propagating assignment from merge task 54 to constrained sql task 45
DEBUG: propagating assignment from merge task 61 to constrained sql task 51
DEBUG: propagating assignment from merge task 61 to constrained sql task 54
DEBUG: propagating assignment from merge task 68 to constrained sql task 60
DEBUG: CommitTransactionCommand
count
-------
@ -162,6 +214,8 @@ DEBUG: assigned task 6 to node localhost:57637
DEBUG: assigned task 8 to node localhost:57638
DEBUG: assigned task 10 to node localhost:57637
DEBUG: assigned task 12 to node localhost:57638
DEBUG: assigned task 14 to node localhost:57637
DEBUG: assigned task 16 to node localhost:57638
DEBUG: assigned task 6 to node localhost:57637
DEBUG: assigned task 4 to node localhost:57638
DEBUG: assigned task 2 to node localhost:57637
@ -178,19 +232,19 @@ DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 13
DETAIL: Creating dependency on merge taskId 17
DEBUG: pruning merge fetch taskId 2
DETAIL: Creating dependency on merge taskId 7
DEBUG: pruning merge fetch taskId 4
DETAIL: Creating dependency on merge taskId 20
DETAIL: Creating dependency on merge taskId 26
DEBUG: pruning merge fetch taskId 5
DETAIL: Creating dependency on merge taskId 11
DEBUG: pruning merge fetch taskId 7
DETAIL: Creating dependency on merge taskId 27
DETAIL: Creating dependency on merge taskId 35
DEBUG: pruning merge fetch taskId 8
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 34
DETAIL: Creating dependency on merge taskId 44
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 19
DEBUG: assigned task 3 to node localhost:57637

View File

@ -41,8 +41,8 @@ DEBUG: push down of limit count: 600
153937 | 2761321906
199283 | 2726988572
185925 | 2672114100
196629 | 2622637602
157064 | 2614644408
189336 | 2596175232
(10 rows)
-- Disable limit optimization for our second test. This time, we have a query
@ -81,15 +81,15 @@ DEBUG: push down of limit count: 150
c_custkey | c_name | lineitem_count
-----------+--------------------+----------------
43 | Customer#000000043 | 42
370 | Customer#000000370 | 36
370 | Customer#000000370 | 38
79 | Customer#000000079 | 37
689 | Customer#000000689 | 36
472 | Customer#000000472 | 35
685 | Customer#000000685 | 35
643 | Customer#000000643 | 34
226 | Customer#000000226 | 33
496 | Customer#000000496 | 32
685 | Customer#000000685 | 32
304 | Customer#000000304 | 31
472 | Customer#000000472 | 31
79 | Customer#000000079 | 30
145 | Customer#000000145 | 30
(10 rows)
RESET citus.large_table_shard_count;

View File

@ -11,13 +11,13 @@ SET citus.large_table_shard_count TO 2;
SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290000;
shardminvalue | shardmaxvalue
---------------+---------------
1 | 2496
1 | 1509
(1 row)
SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290001;
shardminvalue | shardmaxvalue
---------------+---------------
2497 | 4964
1509 | 2951
(1 row)
-- Check that partition and join pruning works when min/max values exist
@ -25,8 +25,10 @@ SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290001;
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1;
DEBUG: predicate pruning for shardId 290001
DEBUG: predicate pruning for shardId 290002
DEBUG: predicate pruning for shardId 290004
DEBUG: predicate pruning for shardId 290003
DEBUG: predicate pruning for shardId 290005
DEBUG: predicate pruning for shardId 290006
DEBUG: predicate pruning for shardId 290007
l_orderkey | l_linenumber | l_shipdate
------------+--------------+------------
1 | 1 | 03-13-1996
@ -45,12 +47,14 @@ DEBUG: predicate pruning for shardId 290005
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
WHERE l_orderkey = o_orderkey;
DEBUG: join prunable for intervals [1,2496] and [8997,14946]
DEBUG: join prunable for intervals [2497,4964] and [8997,14946]
DEBUG: join prunable for intervals [4965,5986] and [8997,14946]
DEBUG: join prunable for intervals [8997,11554] and [1,5986]
DEBUG: join prunable for intervals [11554,13920] and [1,5986]
DEBUG: join prunable for intervals [13921,14947] and [1,5986]
DEBUG: join prunable for intervals [1,1509] and [8997,14946]
DEBUG: join prunable for intervals [1509,2951] and [8997,14946]
DEBUG: join prunable for intervals [2951,4455] and [8997,14946]
DEBUG: join prunable for intervals [4480,5986] and [8997,14946]
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
sum | avg
-------+--------------------
36086 | 3.0076679446574429
@ -62,8 +66,10 @@ UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 290000;
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
DEBUG: predicate pruning for shardId 290001
DEBUG: predicate pruning for shardId 290002
DEBUG: predicate pruning for shardId 290004
DEBUG: predicate pruning for shardId 290003
DEBUG: predicate pruning for shardId 290005
DEBUG: predicate pruning for shardId 290006
DEBUG: predicate pruning for shardId 290007
l_orderkey | l_linenumber | l_shipdate
------------+--------------+------------
9030 | 1 | 09-02-1998
@ -76,11 +82,13 @@ DEBUG: predicate pruning for shardId 290005
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
WHERE l_orderkey = o_orderkey;
DEBUG: join prunable for intervals [2497,4964] and [8997,14946]
DEBUG: join prunable for intervals [4965,5986] and [8997,14946]
DEBUG: join prunable for intervals [8997,11554] and [1,5986]
DEBUG: join prunable for intervals [11554,13920] and [1,5986]
DEBUG: join prunable for intervals [13921,14947] and [1,5986]
DEBUG: join prunable for intervals [1509,2951] and [8997,14946]
DEBUG: join prunable for intervals [2951,4455] and [8997,14946]
DEBUG: join prunable for intervals [4480,5986] and [8997,14946]
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
sum | avg
-------+--------------------
36086 | 3.0076679446574429
@ -91,8 +99,10 @@ DEBUG: join prunable for intervals [13921,14947] and [1,5986]
UPDATE pg_dist_shard SET shardmaxvalue = NULL WHERE shardid = 290001;
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
DEBUG: predicate pruning for shardId 290002
DEBUG: predicate pruning for shardId 290004
DEBUG: predicate pruning for shardId 290003
DEBUG: predicate pruning for shardId 290005
DEBUG: predicate pruning for shardId 290006
DEBUG: predicate pruning for shardId 290007
l_orderkey | l_linenumber | l_shipdate
------------+--------------+------------
9030 | 1 | 09-02-1998
@ -105,10 +115,12 @@ DEBUG: predicate pruning for shardId 290005
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
WHERE l_orderkey = o_orderkey;
DEBUG: join prunable for intervals [4965,5986] and [8997,14946]
DEBUG: join prunable for intervals [8997,11554] and [1,5986]
DEBUG: join prunable for intervals [11554,13920] and [1,5986]
DEBUG: join prunable for intervals [13921,14947] and [1,5986]
DEBUG: join prunable for intervals [2951,4455] and [8997,14946]
DEBUG: join prunable for intervals [4480,5986] and [8997,14946]
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
sum | avg
-------+--------------------
36086 | 3.0076679446574429
@ -120,8 +132,10 @@ UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = 290000;
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
DEBUG: predicate pruning for shardId 290000
DEBUG: predicate pruning for shardId 290002
DEBUG: predicate pruning for shardId 290004
DEBUG: predicate pruning for shardId 290003
DEBUG: predicate pruning for shardId 290005
DEBUG: predicate pruning for shardId 290006
DEBUG: predicate pruning for shardId 290007
l_orderkey | l_linenumber | l_shipdate
------------+--------------+------------
9030 | 1 | 09-02-1998
@ -134,11 +148,13 @@ DEBUG: predicate pruning for shardId 290005
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
WHERE l_orderkey = o_orderkey;
DEBUG: join prunable for intervals [0,2496] and [8997,14946]
DEBUG: join prunable for intervals [4965,5986] and [8997,14946]
DEBUG: join prunable for intervals [8997,11554] and [1,5986]
DEBUG: join prunable for intervals [11554,13920] and [1,5986]
DEBUG: join prunable for intervals [13921,14947] and [1,5986]
DEBUG: join prunable for intervals [0,1509] and [8997,14946]
DEBUG: join prunable for intervals [2951,4455] and [8997,14946]
DEBUG: join prunable for intervals [4480,5986] and [8997,14946]
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
sum | avg
-------+--------------------
36086 | 3.0076679446574429

View File

@ -11,8 +11,10 @@ SET client_min_messages TO DEBUG2;
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1;
DEBUG: predicate pruning for shardId 290001
DEBUG: predicate pruning for shardId 290002
DEBUG: predicate pruning for shardId 290004
DEBUG: predicate pruning for shardId 290003
DEBUG: predicate pruning for shardId 290005
DEBUG: predicate pruning for shardId 290006
DEBUG: predicate pruning for shardId 290007
l_orderkey | l_linenumber | l_shipdate
------------+--------------+------------
1 | 1 | 03-13-1996
@ -38,6 +40,7 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem WHERE l_orderkey > 903
DEBUG: predicate pruning for shardId 290000
DEBUG: predicate pruning for shardId 290001
DEBUG: predicate pruning for shardId 290002
DEBUG: predicate pruning for shardId 290003
sum | avg
-------+--------------------
17999 | 3.0189533713518953
@ -45,7 +48,7 @@ DEBUG: predicate pruning for shardId 290002
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem
WHERE (l_orderkey < 4000 OR l_orderkey > 9030);
DEBUG: predicate pruning for shardId 290002
DEBUG: predicate pruning for shardId 290003
sum | avg
-------+--------------------
30184 | 3.0159872102318145
@ -59,6 +62,8 @@ DEBUG: predicate pruning for shardId 290002
DEBUG: predicate pruning for shardId 290003
DEBUG: predicate pruning for shardId 290004
DEBUG: predicate pruning for shardId 290005
DEBUG: predicate pruning for shardId 290006
DEBUG: predicate pruning for shardId 290007
sum | avg
-----+-----
|

View File

@ -13,7 +13,7 @@ CREATE TABLE public.nation_local(
n_regionkey integer not null,
n_comment varchar(152)
);
\COPY public.nation_local FROM STDIN with delimiter '|';
\copy public.nation_local FROM STDIN with delimiter '|';
CREATE TABLE test_schema_support.nation_append(
n_nationkey integer not null,
n_name char(25) not null,
@ -122,7 +122,7 @@ SELECT master_create_distributed_table('nation_append_search_path', 'n_nationkey
(1 row)
\COPY nation_append_search_path FROM STDIN with delimiter '|';
\copy nation_append_search_path FROM STDIN with delimiter '|';
-- create shard with master_create_worker_shards
CREATE TABLE test_schema_support.nation_hash(
n_nationkey integer not null,
@ -192,7 +192,7 @@ SELECT * FROM nation_hash WHERE n_nationkey = 7;
-- test UDFs with schemas
SET search_path TO public;
\COPY test_schema_support.nation_hash FROM STDIN with delimiter '|';
\copy test_schema_support.nation_hash FROM STDIN with delimiter '|';
-- create UDF in master node
CREATE OR REPLACE FUNCTION dummyFunction(theValue integer)
RETURNS text AS
@ -446,7 +446,7 @@ SELECT master_create_worker_shards('test_schema_support.nation_hash_collation',
(1 row)
\COPY test_schema_support.nation_hash_collation FROM STDIN with delimiter '|';
\copy test_schema_support.nation_hash_collation FROM STDIN with delimiter '|';
SELECT * FROM test_schema_support.nation_hash_collation;
n_nationkey | n_name | n_regionkey | n_comment
-------------+---------------------------+-------------+-------------------------------------------------------------------------------------------------------------
@ -489,7 +489,7 @@ SELECT master_create_worker_shards('nation_hash_collation_search_path', 4, 2);
(1 row)
\COPY nation_hash_collation_search_path FROM STDIN with delimiter '|';
\copy nation_hash_collation_search_path FROM STDIN with delimiter '|';
SELECT * FROM nation_hash_collation_search_path;
n_nationkey | n_name | n_regionkey | n_comment
-------------+---------------------------+-------------+-------------------------------------------------------------------------------------------------------------
@ -542,7 +542,7 @@ SELECT master_create_worker_shards('test_schema_support.nation_hash_composite_ty
(1 row)
-- insert some data to verify composite type queries
\COPY test_schema_support.nation_hash_composite_types FROM STDIN with delimiter '|';
\copy test_schema_support.nation_hash_composite_types FROM STDIN with delimiter '|';
SELECT * FROM test_schema_support.nation_hash_composite_types WHERE test_col = '(a,a)'::test_schema_support.new_composite_type;
n_nationkey | n_name | n_regionkey | n_comment | test_col
-------------+---------------------------+-------------+----------------------------------------------------+----------
@ -829,7 +829,7 @@ SELECT master_apply_delete_command('DELETE FROM test_schema_support.nation_appen
\c - - - :master_port
-- test with search_path is set
SET search_path TO test_schema_support;
\COPY nation_append FROM STDIN with delimiter '|';
\copy nation_append FROM STDIN with delimiter '|';
SELECT master_apply_delete_command('DELETE FROM nation_append') ;
master_apply_delete_command
-----------------------------
@ -873,7 +873,7 @@ SELECT master_create_worker_shards('test_schema_support_join_1.nation_hash', 4,
(1 row)
\COPY test_schema_support_join_1.nation_hash FROM STDIN with delimiter '|';
\copy test_schema_support_join_1.nation_hash FROM STDIN with delimiter '|';
SELECT master_create_distributed_table('test_schema_support_join_1.nation_hash_2', 'n_nationkey', 'hash');
master_create_distributed_table
---------------------------------
@ -886,7 +886,7 @@ SELECT master_create_worker_shards('test_schema_support_join_1.nation_hash_2', 4
(1 row)
\COPY test_schema_support_join_1.nation_hash_2 FROM STDIN with delimiter '|';
\copy test_schema_support_join_1.nation_hash_2 FROM STDIN with delimiter '|';
SELECT master_create_distributed_table('test_schema_support_join_2.nation_hash', 'n_nationkey', 'hash');
master_create_distributed_table
---------------------------------
@ -899,7 +899,7 @@ SELECT master_create_worker_shards('test_schema_support_join_2.nation_hash', 4,
(1 row)
\COPY test_schema_support_join_2.nation_hash FROM STDIN with delimiter '|';
\copy test_schema_support_join_2.nation_hash FROM STDIN with delimiter '|';
-- check when search_path is public,
-- join of two tables which are in different schemas,
-- join on partition column

View File

@ -3,7 +3,7 @@
--
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 350000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 350000;
-- Create a new hash partitioned multi_shard_modify_test table and stage data into it.
-- Create a new hash partitioned multi_shard_modify_test table and load data into it.
CREATE TABLE multi_shard_modify_test (
t_key integer not null,
t_name varchar(25) not null,

View File

@ -7,7 +7,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 200000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 200000;
-- Create a new range partitioned lineitem table and stage data into it
-- Create a new range partitioned lineitem table and load data into it
CREATE TABLE lineitem_range (
l_orderkey bigint not null,
l_partkey integer not null,
@ -27,9 +27,19 @@ CREATE TABLE lineitem_range (
l_comment varchar(44) not null );
SELECT master_create_distributed_table('lineitem_range', 'l_orderkey', 'range');
SELECT master_create_empty_shard('lineitem_range') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('lineitem_range') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14947
WHERE shardid = :new_shard_id;
SET citus.shard_max_size TO "500MB";
\STAGE lineitem_range FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\STAGE lineitem_range FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\copy lineitem_range FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\copy lineitem_range FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
RESET citus.shard_max_size;
-- Run aggregate(distinct) on partition column for range partitioned table
@ -89,8 +99,8 @@ CREATE TABLE lineitem_hash (
SELECT master_create_distributed_table('lineitem_hash', 'l_orderkey', 'hash');
SELECT master_create_worker_shards('lineitem_hash', 4, 1);
\COPY lineitem_hash FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\COPY lineitem_hash FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
-- aggregate(distinct) on partition column is allowed

View File

@ -14,7 +14,7 @@ SELECT sum(l_suppkey) / 2::numeric FROM lineitem;
SELECT sum(l_suppkey)::int8 / 2 FROM lineitem;
-- Create a new table to test type conversions on different types, and stage
-- Create a new table to test type conversions on different types, and load
-- data into this table. Then, apply aggregate functions and divide / multiply
-- the results to test type conversions.
@ -24,7 +24,7 @@ CREATE TABLE aggregate_type (
interval_value interval not null);
SELECT master_create_distributed_table('aggregate_type', 'float_value', 'append');
\STAGE aggregate_type FROM '@abs_srcdir@/data/agg_type.data'
\copy aggregate_type FROM '@abs_srcdir@/data/agg_type.data'
-- Test conversions using aggregates on floats and division

View File

@ -29,7 +29,7 @@ CREATE TABLE lineitem_alter (
l_comment varchar(44) not null
);
SELECT master_create_distributed_table('lineitem_alter', 'l_orderkey', 'append');
\STAGE lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\copy lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
-- Verify that we can add columns
@ -57,8 +57,8 @@ SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1;
ALTER TABLE lineitem_alter ALTER COLUMN float_column SET DEFAULT 1;
ALTER TABLE lineitem_alter ALTER COLUMN int_column1 DROP DEFAULT;
-- \stage to verify that default values take effect
\STAGE lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
-- \copy to verify that default values take effect
\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column;
SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1;
@ -71,16 +71,17 @@ ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET NOT NULL;
-- Drop default so that NULLs will be inserted for this column
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP DEFAULT;
-- \stage should fail because it will try to insert NULLs for a NOT NULL column
\STAGE lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
-- \copy should fail because it will try to insert NULLs for a NOT NULL column
-- Note, this operation will create a table on the workers but it won't be in the metadata
\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
-- Verify that DROP NOT NULL works
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP NOT NULL;
\d lineitem_alter
-- \stage should succeed now
\STAGE lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
-- \copy should succeed now
\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
SELECT count(*) from lineitem_alter;
-- Verify that SET DATA TYPE works
@ -258,7 +259,8 @@ DROP TABLESPACE super_fast_ssd;
SET citus.enable_ddl_propagation to true;
SELECT master_apply_delete_command('DELETE FROM lineitem_alter');
DROP TABLE lineitem_alter;
-- check that nothing's left over on workers
-- check that nothing's left over on workers, other than the leftover shard created
-- during the unsuccessful COPY
\c - - - :worker_1_port
SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%';
\c - - - :master_port

View File

@ -32,12 +32,12 @@ SELECT master_create_worker_shards('multi_append_table_to_shard_right_hash', 1,
-- Replicate 'left' table on both workers
SELECT set_config('citus.shard_replication_factor', '2', false);
\STAGE multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data'
\STAGE multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data'
\copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data'
\copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data'
-- Place 'right' table only on the primary worker
SELECT set_config('citus.shard_replication_factor', '1', false);
\STAGE multi_append_table_to_shard_right FROM '@abs_srcdir@/data/agg.data'
\copy multi_append_table_to_shard_right FROM '@abs_srcdir@/data/agg.data'
-- Reset shard replication factor to ensure tasks will be assigned to both workers
SELECT set_config('citus.shard_replication_factor', '2', false);
@ -111,7 +111,7 @@ SELECT master_create_empty_shard('multi_append_table_to_shard_date');
SELECT * FROM multi_append_table_to_shard_date;
-- Stage an empty table and check that we can query the distributed table
-- Create an empty distributed table and check that we can query it
CREATE TABLE multi_append_table_to_shard_stage (LIKE multi_append_table_to_shard_date);
SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636)
FROM
@ -120,7 +120,7 @@ WHERE 'multi_append_table_to_shard_date'::regclass::oid = logicalrelid;
SELECT * FROM multi_append_table_to_shard_date;
-- Stage NULL values and check that we can query the table
-- INSERT NULL values and check that we can query the table
INSERT INTO multi_append_table_to_shard_stage VALUES (NULL, NULL);
SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636)
FROM
@ -129,7 +129,7 @@ WHERE 'multi_append_table_to_shard_date'::regclass::oid = logicalrelid;
SELECT * FROM multi_append_table_to_shard_date;
-- Stage regular values and check that we can query the table
-- INSERT regular values and check that we can query the table
INSERT INTO multi_append_table_to_shard_stage VALUES ('2016-01-01', 3);
SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636)
FROM

View File

@ -29,8 +29,8 @@ CREATE TABLE lineitem_hash (
SELECT master_create_distributed_table('lineitem_hash', 'l_orderkey', 'hash');
SELECT master_create_worker_shards('lineitem_hash', 8, 1);
\COPY lineitem_hash FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\COPY lineitem_hash FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
SET citus.task_executor_type to "task-tracker";

View File

@ -105,7 +105,7 @@ COPY customer_copy_hash FROM '@abs_srcdir@/data/customer.2.data' WITH (DELIMITER
SELECT count(*) FROM customer_copy_hash;
-- Test client-side copy from file
\COPY customer_copy_hash FROM '@abs_srcdir@/data/customer.3.data' WITH (DELIMITER '|');
\copy customer_copy_hash FROM '@abs_srcdir@/data/customer.3.data' WITH (DELIMITER '|');
-- Confirm that data was copied
SELECT count(*) FROM customer_copy_hash;

View File

@ -11,6 +11,6 @@ CREATE TABLE nation (
n_comment varchar(152));
SELECT master_create_distributed_table('tpch.nation', 'n_nationkey', 'append');
\STAGE tpch.nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
\copy tpch.nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
SELECT count(*) from tpch.nation;

View File

@ -2,7 +2,7 @@
-- MULTI_LARGE_SHARDID
--
-- Stage data to distributed tables, and run TPC-H query #1 and #6. This test
-- Load data into distributed tables, and run TPC-H query #1 and #6. This test
-- differs from previous tests in that it modifies the *internal* shardId
-- generator, forcing the distributed database to use 64-bit shard identifiers.
@ -11,10 +11,10 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 100200300400500;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 9000000;
-- Stage additional data to start using large shard identifiers.
-- Load additional data to start using large shard identifiers.
\STAGE lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\STAGE lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\copy lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
-- Query #1 from the TPC-H decision support benchmark.

View File

@ -0,0 +1,21 @@
--
-- MULTI_STAGE_DATA
--
-- Tests for loading data in a distributed cluster. Please note that the number
-- of shards uploaded depends on two config values: citus.shard_replication_factor and
-- citus.shard_max_size. These values are set in pg_regress_multi.pl. Shard placement
-- policy is left to the default value (round-robin) to test the common install case.
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 290000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 290000;
\copy lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\copy orders FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
\copy orders FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
\copy customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
\copy nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
\copy part FROM '@abs_srcdir@/data/part.data' with delimiter '|'
\copy supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|'

View File

@ -1,7 +1,7 @@
--
-- MULTI_STAGE_LARGE_RECORDS
--
-- Tests for staging data with large records (i.e. greater than the read buffer
-- Tests for loading data with large records (i.e. greater than the read buffer
-- size, which is 32kB) in a distributed cluster. These tests make sure that we
-- are creating shards of correct size even when records are large.
@ -15,7 +15,7 @@ SET citus.shard_max_size TO "256kB";
CREATE TABLE large_records_table (data_id integer, data text);
SELECT master_create_distributed_table('large_records_table', 'data_id', 'append');
\STAGE large_records_table FROM '@abs_srcdir@/data/large_records.data' with delimiter '|'
\copy large_records_table FROM '@abs_srcdir@/data/large_records.data' with delimiter '|'
SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard, pg_class
WHERE pg_class.oid=logicalrelid AND relname='large_records_table'

View File

@ -0,0 +1,16 @@
--
-- MULTI_STAGE_MORE_DATA
--
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 280000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 280000;
-- We load more data to customer and part tables to test distributed joins. The
-- loading causes the planner to consider customer and part tables as large, and
-- evaluate plans where some of the underlying tables need to be repartitioned.
\copy customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|'
\copy customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|'
\copy part FROM '@abs_srcdir@/data/part.more.data' with delimiter '|'

View File

@ -7,7 +7,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 320000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 320000;
-- Create a new range partitioned customer_delete_protocol table and stage data into it.
-- Create a new range partitioned customer_delete_protocol table and load data into it.
CREATE TABLE customer_delete_protocol (
c_custkey integer not null,
c_name varchar(25) not null,
@ -19,9 +19,9 @@ CREATE TABLE customer_delete_protocol (
c_comment varchar(117) not null);
SELECT master_create_distributed_table('customer_delete_protocol', 'c_custkey', 'append');
\STAGE customer_delete_protocol FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
\STAGE customer_delete_protocol FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|'
\STAGE customer_delete_protocol FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|'
\copy customer_delete_protocol FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
\copy customer_delete_protocol FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|'
\copy customer_delete_protocol FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|'
-- Testing master_apply_delete_command
-- Check that we don't support conditions on columns other than partition key.

View File

@ -53,11 +53,11 @@ FROM
multi_outer_join_left a LEFT JOIN multi_outer_join_third b ON (l_custkey = t_custkey);
-- Left table is a large table
\STAGE multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-10.data' with delimiter '|'
\STAGE multi_outer_join_left FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|'
\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-10.data' with delimiter '|'
\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|'
-- Right table is a small table
\STAGE multi_outer_join_right FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|'
\copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|'
-- Make sure we do not crash if one table has no shards
SELECT
@ -71,7 +71,7 @@ FROM
multi_outer_join_third a LEFT JOIN multi_outer_join_right b ON (r_custkey = t_custkey);
-- Third table is a single shard table with all data
\STAGE multi_outer_join_third FROM '@abs_srcdir@/data/customer-1-30.data' with delimiter '|'
\copy multi_outer_join_third FROM '@abs_srcdir@/data/customer-1-30.data' with delimiter '|'
-- Regular outer join should return results for all rows
SELECT
@ -150,7 +150,7 @@ FROM
-- Turn the right table into a large table
\STAGE multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
\copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
-- Shards do not have 1-1 matching. We should error here.
@ -164,11 +164,11 @@ SELECT * FROM master_apply_delete_command('DELETE FROM multi_outer_join_left');
SELECT * FROM master_apply_delete_command('DELETE FROM multi_outer_join_right');
-- reload shards with 1-1 matching
\STAGE multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|'
\STAGE multi_outer_join_left FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|'
\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
\STAGE multi_outer_join_right FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|'
\STAGE multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
\copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|'
\copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
-- multi_outer_join_third is a single shard table
@ -409,7 +409,7 @@ ORDER BY cnt DESC, l1.l_custkey DESC
LIMIT 20;
-- Add a shard to the left table that overlaps with multiple shards in the right
\STAGE multi_outer_join_left FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
-- All outer joins should error out

View File

@ -1,34 +0,0 @@
--
-- MULTI_STAGE_DATA
--
-- Tests for staging data in a distributed cluster. Please note that the number
-- of shards uploaded depends on two config values: citus.shard_replication_factor and
-- citus.shard_max_size. These values are set in pg_regress_multi.pl. Shard placement
-- policy is left to the default value (round-robin) to test the common install case.
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 290000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 290000;
\STAGE lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\STAGE lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\STAGE orders FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
\STAGE orders FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
\STAGE customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
\STAGE nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
\STAGE part FROM '@abs_srcdir@/data/part.data' with delimiter '|'
\STAGE supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|'
-- check that we error out if we try to stage into a hash partitioned table
CREATE TABLE nation_hash_partitioned (
n_nationkey integer not null,
n_name char(25) not null,
n_regionkey integer not null,
n_comment varchar(152));
SELECT master_create_distributed_table('nation_hash_partitioned', 'n_nationkey', 'hash');
\STAGE nation_hash_partitioned FROM '@abs_srcdir@/data/nation.data' with delimiter '|'

View File

@ -1,16 +0,0 @@
--
-- MULTI_STAGE_MORE_DATA
--
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 280000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 280000;
-- We stage more data to customer and part tables to test distributed joins. The
-- staging causes the planner to consider customer and part tables as large, and
-- evaluate plans where some of the underlying tables need to be repartitioned.
\STAGE customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|'
\STAGE customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|'
\STAGE part FROM '@abs_srcdir@/data/part.more.data' with delimiter '|'

View File

@ -77,15 +77,35 @@ FROM
GROUP BY
l_orderkey) AS unit_prices;
-- Stage data to tables.
-- Load data into tables.
SELECT master_create_empty_shard('lineitem_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('lineitem_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14947
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('orders_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('orders_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14946
WHERE shardid = :new_shard_id;
SET citus.shard_max_size TO "1MB";
\STAGE lineitem_subquery FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\STAGE lineitem_subquery FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\copy lineitem_subquery FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\copy lineitem_subquery FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\STAGE orders_subquery FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
\STAGE orders_subquery FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
\copy orders_subquery FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
\copy orders_subquery FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
-- Check that we error out if shard min/max values are not exactly same.
@ -280,10 +300,13 @@ SELECT max(l_orderkey) FROM
) z
) y;
-- Load more data to one relation, then test if we error out because of different
-- Add one more shard to one relation, then test if we error out because of different
-- shard counts for joining relations.
\STAGE orders_subquery FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
SELECT master_create_empty_shard('orders_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 15000, shardmaxvalue = 20000
WHERE shardid = :new_shard_id;
SELECT
avg(unit_price)

View File

@ -18,7 +18,7 @@ test: multi_table_ddl
# uploading data to it.
# ----------
test: multi_create_table
test: multi_stage_data
test: multi_load_data
test: multi_basic_queries multi_complex_expressions multi_verify_no_subquery
test: multi_single_relation_subquery

View File

@ -20,6 +20,7 @@ test: multi_tpch_query1 multi_tpch_query3 multi_tpch_query6 multi_tpch_query10
test: multi_tpch_query12 multi_tpch_query14 multi_tpch_query19
# ----------
# multi_fdw_large_shardid stages more shards into lineitem, and must come last
# multi_fdw_large_shardid loads more lineitem data using high shard identifiers, and must
# come last
# ----------
test: multi_fdw_large_shardid

View File

@ -24,7 +24,7 @@ test: multi_table_ddl
# ----------
test: multi_create_table
test: multi_master_protocol
test: multi_stage_data
test: multi_load_data
# ----------
# Miscellaneous tests to check our query planning behavior
@ -55,26 +55,26 @@ test: multi_tpch_query12 multi_tpch_query14 multi_tpch_query19
test: multi_tpch_query7 multi_tpch_query7_nested
# ----------
# Parallel tests to check our join order planning logic. Note that we stage data
# Parallel tests to check our join order planning logic. Note that we load data
# below; and therefore these tests should come after the execution tests.
# ----------
test: multi_join_order_tpch_small multi_join_order_additional
test: multi_stage_more_data
test: multi_load_more_data
test: multi_join_order_tpch_large
# ----------
# Tests for large-table join planning and execution.
# Be careful when staging new data before these tests, as they
# expect specific shard identifiers in the output.
# Tests for large-table join planning and execution. Be careful when creating
# new shards before these tests, as they expect specific shard identifiers in
# the output.
# ----------
test: multi_large_table_join_planning
test: multi_large_table_pruning
test: multi_large_table_task_assignment
# ----------
# Tests to check our large record staging and shard deletion behavior
# Tests to check our large record loading and shard deletion behavior
# ----------
test: multi_stage_large_records
test: multi_load_large_records
test: multi_master_delete_protocol
test: multi_shard_modify
@ -85,7 +85,7 @@ test: multi_index_statements
test: multi_alter_table_statements
# ----------
# multi_create_schema tests creation, staging and querying of a table in a new
# multi_create_schema tests creation, loading, and querying of a table in a new
# schema (namespace).
# ----------
test: multi_create_schema
@ -97,13 +97,13 @@ test: multi_create_schema
test: multi_utility_warnings
# ---------
# multi_append_table_to_shard stages shards in a way that forces
# multi_append_table_to_shard loads data to create shards in a way that forces
# shard caching.
# ---------
test: multi_append_table_to_shard
# ---------
# multi_outer_join stages shards to create different mappings for outer joins
# multi_outer_join loads data to create shards to test outer join mappings
# ---------
test: multi_outer_join
@ -141,7 +141,7 @@ test: multi_copy
test: multi_router_planner
# ----------
# multi_large_shardid stages more shards into lineitem
# multi_large_shardid loads more lineitem data using high shard identifiers
# ----------
test: multi_large_shardid

View File

@ -22,7 +22,7 @@ test: multi_table_ddl
# ----------
test: multi_create_table
test: multi_master_protocol
test: multi_stage_data
test: multi_load_data
# ----------
# Miscellaneous tests to check our query planning behavior
@ -48,28 +48,28 @@ test: multi_tpch_query12 multi_tpch_query14 multi_tpch_query19
test: multi_tpch_query7 multi_tpch_query7_nested
# ----------
# Parallel tests to check our join order planning logic. Note that we stage data
# Parallel tests to check our join order planning logic. Note that we load data
# below; and therefore these tests should come after the execution tests.
# ----------
test: multi_join_order_tpch_small multi_join_order_additional
test: multi_stage_more_data
test: multi_load_more_data
test: multi_join_order_tpch_large
# ----------
# Tests to check our large record staging and shard deletion behavior
# Tests to check our large record loading and shard deletion behavior
# ----------
test: multi_stage_large_records
test: multi_load_large_records
test: multi_master_delete_protocol
test: multi_shard_modify
# ----------
# multi_create_schema tests creation, staging and querying of a table in a new
# multi_create_schema tests creation, loading, and querying of a table in a new
# schema (namespace).
# ----------
test: multi_create_schema
# ---------
# multi_outer_join stages shards to create different mappings for outer joins
# multi_outer_join loads data to create shards to test outer join mappings
# ---------
test: multi_outer_join
@ -99,7 +99,7 @@ test: multi_data_types
test: multi_copy
# ----------
# multi_large_shardid stages more shards into lineitem
# multi_large_shardid loads more lineitem data using high shard identifiers
# ----------
test: multi_large_shardid

View File

@ -3,7 +3,7 @@
--
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 200000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 200000;
-- Create a new range partitioned lineitem table and stage data into it
-- Create a new range partitioned lineitem table and load data into it
CREATE TABLE lineitem_range (
l_orderkey bigint not null,
l_partkey integer not null,
@ -27,9 +27,17 @@ SELECT master_create_distributed_table('lineitem_range', 'l_orderkey', 'range');
(1 row)
SELECT master_create_empty_shard('lineitem_range') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('lineitem_range') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14947
WHERE shardid = :new_shard_id;
SET citus.shard_max_size TO "500MB";
\STAGE lineitem_range FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\STAGE lineitem_range FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\copy lineitem_range FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\copy lineitem_range FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
RESET citus.shard_max_size;
-- Run aggregate(distinct) on partition column for range partitioned table
SELECT count(distinct l_orderkey) FROM lineitem_range;
@ -131,8 +139,8 @@ SELECT master_create_worker_shards('lineitem_hash', 4, 1);
(1 row)
\COPY lineitem_hash FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\COPY lineitem_hash FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
-- aggregate(distinct) on partition column is allowed
SELECT count(distinct l_orderkey) FROM lineitem_hash;
count

View File

@ -28,7 +28,7 @@ SELECT sum(l_suppkey)::int8 / 2 FROM lineitem;
30308988
(1 row)
-- Create a new table to test type conversions on different types, and stage
-- Create a new table to test type conversions on different types, and load
-- data into this table. Then, apply aggregate functions and divide / multiply
-- the results to test type conversions.
CREATE TABLE aggregate_type (
@ -41,7 +41,7 @@ SELECT master_create_distributed_table('aggregate_type', 'float_value', 'append'
(1 row)
\STAGE aggregate_type FROM '@abs_srcdir@/data/agg_type.data'
\copy aggregate_type FROM '@abs_srcdir@/data/agg_type.data'
-- Test conversions using aggregates on floats and division
SELECT min(float_value), max(float_value),
sum(float_value), count(float_value), avg(float_value)

View File

@ -30,7 +30,7 @@ SELECT master_create_distributed_table('lineitem_alter', 'l_orderkey', 'append')
(1 row)
\STAGE lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\copy lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
-- Verify that we can add columns
ALTER TABLE lineitem_alter ADD COLUMN float_column FLOAT;
NOTICE: using one-phase commit for distributed DDL commands
@ -121,8 +121,8 @@ ALTER TABLE lineitem_alter ALTER COLUMN float_column SET DEFAULT 1;
NOTICE: using one-phase commit for distributed DDL commands
HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
ALTER TABLE lineitem_alter ALTER COLUMN int_column1 DROP DEFAULT;
-- \stage to verify that default values take effect
\STAGE lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
-- \copy to verify that default values take effect
\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column;
float_column | count
--------------+-------
@ -167,15 +167,11 @@ ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET NOT NULL;
-- Drop default so that NULLs will be inserted for this column
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP DEFAULT;
-- \stage should fail because it will try to insert NULLs for a NOT NULL column
\STAGE lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
-- \copy should fail because it will try to insert NULLs for a NOT NULL column
-- Note, this operation will create a table on the workers but it won't be in the metadata
\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
ERROR: null value in column "int_column2" violates not-null constraint
DETAIL: Failing row contains (1, 155190, 7706, 1, 17.00, 21168.23, 0.04, 0.02, N, O, 03-13-1996, 02-12-1996, 03-22-1996, DELIVER IN PERSON , TRUCK , egular courts above the, 1, null, null, null, null).
CONTEXT: COPY lineitem_alter_220006, line 1: "1|155190|7706|1|17|21168.23|0.04|0.02|N|O|1996-03-13|1996-02-12|1996-03-22|DELIVER IN PERSON|TRUCK|e..."
ERROR: null value in column "int_column2" violates not-null constraint
DETAIL: Failing row contains (1, 155190, 7706, 1, 17.00, 21168.23, 0.04, 0.02, N, O, 03-13-1996, 02-12-1996, 03-22-1996, DELIVER IN PERSON , TRUCK , egular courts above the, 1, null, null, null, null).
CONTEXT: COPY lineitem_alter_220006, line 1: "1|155190|7706|1|17|21168.23|0.04|0.02|N|O|1996-03-13|1996-02-12|1996-03-22|DELIVER IN PERSON|TRUCK|e..."
\stage: failed to replicate shard to enough replicas
DETAIL: Failing row contains (1, 155190, 7706, 1, 17.00, 21168.23, 0.04, 0.02, N, O, 1996-03-13, 1996-02-12, 1996-03-22, DELIVER IN PERSON , TRUCK , egular courts above the, 1, null, null, null, null).
-- Verify that DROP NOT NULL works
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP NOT NULL;
\d lineitem_alter
@ -204,8 +200,8 @@ ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP NOT NULL;
int_column2 | integer |
null_column | integer |
-- \stage should succeed now
\STAGE lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
-- \copy should succeed now
\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
SELECT count(*) from lineitem_alter;
count
-------
@ -474,7 +470,7 @@ SELECT master_create_worker_shards('test_ab', 8, 2);
INSERT INTO test_ab VALUES (2, 10);
INSERT INTO test_ab VALUES (2, 11);
CREATE UNIQUE INDEX temp_unique_index_1 ON test_ab(a);
WARNING: could not create unique index "temp_unique_index_1_220016"
WARNING: could not create unique index "temp_unique_index_1_220021"
DETAIL: Key (a)=(2) is duplicated.
CONTEXT: while executing command on localhost:57638
ERROR: could not execute DDL command on worker node shards
@ -605,15 +601,17 @@ SET citus.enable_ddl_propagation to true;
SELECT master_apply_delete_command('DELETE FROM lineitem_alter');
master_apply_delete_command
-----------------------------
9
14
(1 row)
DROP TABLE lineitem_alter;
-- check that nothing's left over on workers
-- check that nothing's left over on workers, other than the leftover shard created
-- during the unsuccessful COPY
\c - - - :worker_1_port
SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%';
relname
---------
(0 rows)
relname
-----------------------
lineitem_alter_220009
(1 row)
\c - - - :master_port

View File

@ -50,8 +50,8 @@ SELECT set_config('citus.shard_replication_factor', '2', false);
2
(1 row)
\STAGE multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data'
\STAGE multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data'
\copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data'
\copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data'
-- Place 'right' table only on the primary worker
SELECT set_config('citus.shard_replication_factor', '1', false);
set_config
@ -59,7 +59,7 @@ SELECT set_config('citus.shard_replication_factor', '1', false);
1
(1 row)
\STAGE multi_append_table_to_shard_right FROM '@abs_srcdir@/data/agg.data'
\copy multi_append_table_to_shard_right FROM '@abs_srcdir@/data/agg.data'
-- Reset shard replication factor to ensure tasks will be assigned to both workers
SELECT set_config('citus.shard_replication_factor', '2', false);
set_config
@ -175,7 +175,7 @@ SELECT * FROM multi_append_table_to_shard_date;
------------+-------
(0 rows)
-- Stage an empty table and check that we can query the distributed table
-- Create an empty distributed table and check that we can query it
CREATE TABLE multi_append_table_to_shard_stage (LIKE multi_append_table_to_shard_date);
SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636)
FROM
@ -191,7 +191,7 @@ SELECT * FROM multi_append_table_to_shard_date;
------------+-------
(0 rows)
-- Stage NULL values and check that we can query the table
-- INSERT NULL values and check that we can query the table
INSERT INTO multi_append_table_to_shard_stage VALUES (NULL, NULL);
SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636)
FROM
@ -208,7 +208,7 @@ SELECT * FROM multi_append_table_to_shard_date;
|
(1 row)
-- Stage regular values and check that we can query the table
-- INSERT regular values and check that we can query the table
INSERT INTO multi_append_table_to_shard_stage VALUES ('2016-01-01', 3);
SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636)
FROM

View File

@ -34,8 +34,8 @@ SELECT master_create_worker_shards('lineitem_hash', 8, 1);
(1 row)
\COPY lineitem_hash FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\COPY lineitem_hash FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
SET citus.task_executor_type to "task-tracker";
-- count(distinct) is supported on top level query if there
-- is a grouping on the partition key

View File

@ -118,7 +118,7 @@ SELECT count(*) FROM customer_copy_hash;
(1 row)
-- Test client-side copy from file
\COPY customer_copy_hash FROM '@abs_srcdir@/data/customer.3.data' WITH (DELIMITER '|');
\copy customer_copy_hash FROM '@abs_srcdir@/data/customer.3.data' WITH (DELIMITER '|');
-- Confirm that data was copied
SELECT count(*) FROM customer_copy_hash;
count

View File

@ -12,7 +12,7 @@ SELECT master_create_distributed_table('tpch.nation', 'n_nationkey', 'append');
(1 row)
\STAGE tpch.nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
\copy tpch.nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
SELECT count(*) from tpch.nation;
count
-------

View File

@ -1,14 +1,14 @@
--
-- MULTI_LARGE_SHARDID
--
-- Stage data to distributed tables, and run TPC-H query #1 and #6. This test
-- Load data into distributed tables, and run TPC-H query #1 and #6. This test
-- differs from previous tests in that it modifies the *internal* shardId
-- generator, forcing the distributed database to use 64-bit shard identifiers.
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 100200300400500;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 9000000;
-- Stage additional data to start using large shard identifiers.
\STAGE lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\STAGE lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
-- Load additional data to start using large shard identifiers.
\copy lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
-- Query #1 from the TPC-H decision support benchmark.
SELECT
l_returnflag,

View File

@ -0,0 +1,17 @@
--
-- MULTI_STAGE_DATA
--
-- Tests for loading data in a distributed cluster. Please note that the number
-- of shards uploaded depends on two config values: citus.shard_replication_factor and
-- citus.shard_max_size. These values are set in pg_regress_multi.pl. Shard placement
-- policy is left to the default value (round-robin) to test the common install case.
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 290000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 290000;
\copy lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\copy orders FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
\copy orders FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
\copy customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
\copy nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
\copy part FROM '@abs_srcdir@/data/part.data' with delimiter '|'
\copy supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|'

View File

@ -1,7 +1,7 @@
--
-- MULTI_STAGE_LARGE_RECORDS
--
-- Tests for staging data with large records (i.e. greater than the read buffer
-- Tests for loading data with large records (i.e. greater than the read buffer
-- size, which is 32kB) in a distributed cluster. These tests make sure that we
-- are creating shards of correct size even when records are large.
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 300000;
@ -14,7 +14,7 @@ SELECT master_create_distributed_table('large_records_table', 'data_id', 'append
(1 row)
\STAGE large_records_table FROM '@abs_srcdir@/data/large_records.data' with delimiter '|'
\copy large_records_table FROM '@abs_srcdir@/data/large_records.data' with delimiter '|'
SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard, pg_class
WHERE pg_class.oid=logicalrelid AND relname='large_records_table'
ORDER BY shardid;

View File

@ -0,0 +1,11 @@
--
-- MULTI_STAGE_MORE_DATA
--
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 280000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 280000;
-- We load more data to customer and part tables to test distributed joins. The
-- loading causes the planner to consider customer and part tables as large, and
-- evaluate plans where some of the underlying tables need to be repartitioned.
\copy customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|'
\copy customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|'
\copy part FROM '@abs_srcdir@/data/part.more.data' with delimiter '|'

View File

@ -3,7 +3,7 @@
--
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 320000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 320000;
-- Create a new range partitioned customer_delete_protocol table and stage data into it.
-- Create a new range partitioned customer_delete_protocol table and load data into it.
CREATE TABLE customer_delete_protocol (
c_custkey integer not null,
c_name varchar(25) not null,
@ -19,9 +19,9 @@ SELECT master_create_distributed_table('customer_delete_protocol', 'c_custkey',
(1 row)
\STAGE customer_delete_protocol FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
\STAGE customer_delete_protocol FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|'
\STAGE customer_delete_protocol FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|'
\copy customer_delete_protocol FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
\copy customer_delete_protocol FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|'
\copy customer_delete_protocol FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|'
-- Testing master_apply_delete_command
-- Check that we don't support conditions on columns other than partition key.
SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol

View File

@ -62,10 +62,10 @@ FROM
ERROR: cannot perform distributed planning on this query
DETAIL: Shards of relations in outer join queries must have 1-to-1 shard partitioning
-- Left table is a large table
\STAGE multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-10.data' with delimiter '|'
\STAGE multi_outer_join_left FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|'
\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-10.data' with delimiter '|'
\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|'
-- Right table is a small table
\STAGE multi_outer_join_right FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|'
\copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|'
-- Make sure we do not crash if one table has no shards
SELECT
min(l_custkey), max(l_custkey)
@ -84,7 +84,7 @@ LOG: join order: [ "multi_outer_join_third" ][ broadcast join "multi_outer_join
(1 row)
-- Third table is a single shard table with all data
\STAGE multi_outer_join_third FROM '@abs_srcdir@/data/customer-1-30.data' with delimiter '|'
\copy multi_outer_join_third FROM '@abs_srcdir@/data/customer-1-30.data' with delimiter '|'
-- Regular outer join should return results for all rows
SELECT
min(l_custkey), max(l_custkey)
@ -202,7 +202,7 @@ LOG: join order: [ "multi_outer_join_left" ][ broadcast join "multi_outer_join_
(1 row)
-- Turn the right table into a large table
\STAGE multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
\copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
-- Shards do not have 1-1 matching. We should error here.
SELECT
min(l_custkey), max(l_custkey)
@ -224,10 +224,10 @@ SELECT * FROM master_apply_delete_command('DELETE FROM multi_outer_join_right');
(1 row)
-- reload shards with 1-1 matching
\STAGE multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|'
\STAGE multi_outer_join_left FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
\STAGE multi_outer_join_right FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|'
\STAGE multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|'
\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
\copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|'
\copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|'
-- multi_outer_join_third is a single shard table
-- Regular left join should work as expected
SELECT
@ -754,7 +754,7 @@ LIMIT 20;
ERROR: cannot perform distributed planning on this query
DETAIL: Subqueries in outer joins are not supported
-- Add a shard to the left table that overlaps with multiple shards in the right
\STAGE multi_outer_join_left FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
-- All outer joins should error out
SELECT
min(l_custkey), max(l_custkey)

View File

@ -1,31 +0,0 @@
--
-- MULTI_STAGE_DATA
--
-- Tests for staging data in a distributed cluster. Please note that the number
-- of shards uploaded depends on two config values: citus.shard_replication_factor and
-- citus.shard_max_size. These values are set in pg_regress_multi.pl. Shard placement
-- policy is left to the default value (round-robin) to test the common install case.
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 290000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 290000;
\STAGE lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\STAGE lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\STAGE orders FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
\STAGE orders FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
\STAGE customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
\STAGE nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
\STAGE part FROM '@abs_srcdir@/data/part.data' with delimiter '|'
\STAGE supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|'
-- check that we error out if we try to stage into a hash partitioned table
CREATE TABLE nation_hash_partitioned (
n_nationkey integer not null,
n_name char(25) not null,
n_regionkey integer not null,
n_comment varchar(152));
SELECT master_create_distributed_table('nation_hash_partitioned', 'n_nationkey', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
\STAGE nation_hash_partitioned FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
\stage: staging data into hash partitioned tables is not supported

View File

@ -1,11 +0,0 @@
--
-- MULTI_STAGE_MORE_DATA
--
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 280000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 280000;
-- We stage more data to customer and part tables to test distributed joins. The
-- staging causes the planner to consider customer and part tables as large, and
-- evaluate plans where some of the underlying tables need to be repartitioned.
\STAGE customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|'
\STAGE customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|'
\STAGE part FROM '@abs_srcdir@/data/part.more.data' with delimiter '|'

View File

@ -82,12 +82,28 @@ FROM
(1 row)
-- Stage data to tables.
-- Load data into tables.
SELECT master_create_empty_shard('lineitem_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('lineitem_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14947
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('orders_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('orders_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14946
WHERE shardid = :new_shard_id;
SET citus.shard_max_size TO "1MB";
\STAGE lineitem_subquery FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\STAGE lineitem_subquery FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\STAGE orders_subquery FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
\STAGE orders_subquery FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
\copy lineitem_subquery FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\copy lineitem_subquery FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\copy orders_subquery FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
\copy orders_subquery FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
-- Check that we error out if shard min/max values are not exactly same.
SELECT
avg(unit_price)
@ -310,9 +326,12 @@ SELECT max(l_orderkey) FROM
14947
(1 row)
-- Load more data to one relation, then test if we error out because of different
-- Add one more shard to one relation, then test if we error out because of different
-- shard counts for joining relations.
\STAGE orders_subquery FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
SELECT master_create_empty_shard('orders_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 15000, shardmaxvalue = 20000
WHERE shardid = :new_shard_id;
SELECT
avg(unit_price)
FROM

View File

@ -9,9 +9,9 @@
/multi_large_shardid.sql
/multi_master_delete_protocol.sql
/multi_outer_join.sql
/multi_stage_data.sql
/multi_stage_large_records.sql
/multi_stage_more_data.sql
/multi_load_data.sql
/multi_load_large_records.sql
/multi_load_more_data.sql
/multi_subquery.sql
/worker_copy.sql
/multi_complex_count_distinct.sql

View File

@ -65,7 +65,7 @@ CREATE TABLE test_count_distinct_schema.nation_hash(
SELECT master_create_distributed_table('test_count_distinct_schema.nation_hash', 'n_nationkey', 'hash');
SELECT master_create_worker_shards('test_count_distinct_schema.nation_hash', 4, 2);
\COPY test_count_distinct_schema.nation_hash FROM STDIN with delimiter '|';
\copy test_count_distinct_schema.nation_hash FROM STDIN with delimiter '|';
0|ALGERIA|0|haggle. carefully final deposits detect slyly agai
1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon
2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special

View File

@ -10,7 +10,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 630000;
-- Create a table partitioned on integer column and update partition type to
-- hash. Then stage data to this table and update shard min max values with
-- hash. Then load data into this table and update shard min max values with
-- hashed ones. Hash value of 1, 2, 3 and 4 are consecutively -1905060026,
-- 1134484726, -28094569 and -1011077333.

View File

@ -15,7 +15,7 @@ SET client_min_messages TO LOG;
-- Change configuration to treat lineitem, orders, customer, and part tables as
-- large. The following queries are basically the same as the ones in tpch_small
-- except that more data has been staged to customer and part tables. Therefore,
-- except that more data has been loaded into customer and part tables. Therefore,
-- we will apply different distributed join strategies for these queries.
SET citus.large_table_shard_count TO 2;

View File

@ -18,7 +18,7 @@ CREATE TABLE public.nation_local(
n_comment varchar(152)
);
\COPY public.nation_local FROM STDIN with delimiter '|';
\copy public.nation_local FROM STDIN with delimiter '|';
0|ALGERIA|0|haggle. carefully final deposits detect slyly agai
1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon
2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special
@ -85,7 +85,7 @@ CREATE TABLE nation_append_search_path(
);
SELECT master_create_distributed_table('nation_append_search_path', 'n_nationkey', 'append');
\COPY nation_append_search_path FROM STDIN with delimiter '|';
\copy nation_append_search_path FROM STDIN with delimiter '|';
0|ALGERIA|0|haggle. carefully final deposits detect slyly agai
1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon
2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special
@ -146,7 +146,7 @@ SELECT * FROM nation_hash WHERE n_nationkey = 7;
-- test UDFs with schemas
SET search_path TO public;
\COPY test_schema_support.nation_hash FROM STDIN with delimiter '|';
\copy test_schema_support.nation_hash FROM STDIN with delimiter '|';
0|ALGERIA|0|haggle. carefully final deposits detect slyly agai
1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon
2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special
@ -337,7 +337,7 @@ CREATE TABLE test_schema_support.nation_hash_collation(
SELECT master_create_distributed_table('test_schema_support.nation_hash_collation', 'n_nationkey', 'hash');
SELECT master_create_worker_shards('test_schema_support.nation_hash_collation', 4, 2);
\COPY test_schema_support.nation_hash_collation FROM STDIN with delimiter '|';
\copy test_schema_support.nation_hash_collation FROM STDIN with delimiter '|';
0|ALGERIA|0|haggle. carefully final deposits detect slyly agai
1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon
2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special
@ -360,7 +360,7 @@ CREATE TABLE nation_hash_collation_search_path(
SELECT master_create_distributed_table('nation_hash_collation_search_path', 'n_nationkey', 'hash');
SELECT master_create_worker_shards('nation_hash_collation_search_path', 4, 2);
\COPY nation_hash_collation_search_path FROM STDIN with delimiter '|';
\copy nation_hash_collation_search_path FROM STDIN with delimiter '|';
0|ALGERIA|0|haggle. carefully final deposits detect slyly agai
1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon
2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special
@ -396,7 +396,7 @@ SELECT master_create_distributed_table('test_schema_support.nation_hash_composit
SELECT master_create_worker_shards('test_schema_support.nation_hash_composite_types', 4, 2);
-- insert some data to verify composite type queries
\COPY test_schema_support.nation_hash_composite_types FROM STDIN with delimiter '|';
\copy test_schema_support.nation_hash_composite_types FROM STDIN with delimiter '|';
0|ALGERIA|0|haggle. carefully final deposits detect slyly agai|(a,a)
1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon|(a,b)
2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special |(a,c)
@ -531,7 +531,7 @@ SELECT master_apply_delete_command('DELETE FROM test_schema_support.nation_appen
-- test with search_path is set
SET search_path TO test_schema_support;
\COPY nation_append FROM STDIN with delimiter '|';
\copy nation_append FROM STDIN with delimiter '|';
0|ALGERIA|0| haggle. carefully final deposits detect slyly agai
1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon
2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special
@ -578,7 +578,7 @@ CREATE TABLE test_schema_support_join_2.nation_hash (
SELECT master_create_distributed_table('test_schema_support_join_1.nation_hash', 'n_nationkey', 'hash');
SELECT master_create_worker_shards('test_schema_support_join_1.nation_hash', 4, 1);
\COPY test_schema_support_join_1.nation_hash FROM STDIN with delimiter '|';
\copy test_schema_support_join_1.nation_hash FROM STDIN with delimiter '|';
0|ALGERIA|0|haggle. carefully final deposits detect slyly agai
1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon
2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special
@ -590,7 +590,7 @@ SELECT master_create_worker_shards('test_schema_support_join_1.nation_hash', 4,
SELECT master_create_distributed_table('test_schema_support_join_1.nation_hash_2', 'n_nationkey', 'hash');
SELECT master_create_worker_shards('test_schema_support_join_1.nation_hash_2', 4, 1);
\COPY test_schema_support_join_1.nation_hash_2 FROM STDIN with delimiter '|';
\copy test_schema_support_join_1.nation_hash_2 FROM STDIN with delimiter '|';
0|ALGERIA|0|haggle. carefully final deposits detect slyly agai
1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon
2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special
@ -602,7 +602,7 @@ SELECT master_create_worker_shards('test_schema_support_join_1.nation_hash_2', 4
SELECT master_create_distributed_table('test_schema_support_join_2.nation_hash', 'n_nationkey', 'hash');
SELECT master_create_worker_shards('test_schema_support_join_2.nation_hash', 4, 1);
\COPY test_schema_support_join_2.nation_hash FROM STDIN with delimiter '|';
\copy test_schema_support_join_2.nation_hash FROM STDIN with delimiter '|';
0|ALGERIA|0|haggle. carefully final deposits detect slyly agai
1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon
2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special

View File

@ -7,7 +7,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 350000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 350000;
-- Create a new hash partitioned multi_shard_modify_test table and stage data into it.
-- Create a new hash partitioned multi_shard_modify_test table and load data into it.
CREATE TABLE multi_shard_modify_test (
t_key integer not null,
t_name varchar(25) not null,