From b513f1c9114f1be69caeb784ca157215bf100632 Mon Sep 17 00:00:00 2001 From: Eren Basak Date: Mon, 27 Jun 2016 07:23:08 +0300 Subject: [PATCH 1/5] Replace \stage With \copy on Regression Tests Fixes #547 This change removes all references to \stage in the regression tests and puts \COPY instead. Doing so changed shard counts, min/max values on some test tables (lineitem, orders, etc.). --- src/test/regress/expected/multi_array_agg.out | 22 +-- .../expected/multi_complex_expressions.out | 2 +- src/test/regress/expected/multi_explain.out | 57 +++---- .../expected/multi_join_order_additional.out | 72 +++++---- .../regress/expected/multi_join_pruning.out | 34 +++-- .../multi_large_table_join_planning.out | 68 +++++---- .../expected/multi_large_table_pruning.out | 16 +- .../multi_large_table_task_assignment.out | 142 ++++++++++++------ .../multi_limit_clause_approximate.out | 12 +- .../multi_null_minmax_value_pruning.out | 68 +++++---- .../expected/multi_partition_pruning.out | 9 +- .../regress/input/multi_agg_distinct.source | 14 +- .../input/multi_agg_type_conversion.source | 2 +- .../input/multi_alter_table_statements.source | 18 ++- .../input/multi_append_table_to_shard.source | 6 +- .../regress/input/multi_create_schema.source | 2 +- .../regress/input/multi_large_shardid.source | 4 +- .../input/multi_master_delete_protocol.source | 6 +- .../regress/input/multi_outer_join.source | 20 +-- .../regress/input/multi_stage_data.source | 29 +--- .../input/multi_stage_large_records.source | 2 +- .../input/multi_stage_more_data.source | 6 +- src/test/regress/input/multi_subquery.source | 35 ++++- .../regress/output/multi_agg_distinct.source | 12 +- .../output/multi_agg_type_conversion.source | 2 +- .../multi_alter_table_statements.source | 36 +++-- .../output/multi_append_table_to_shard.source | 6 +- .../regress/output/multi_create_schema.source | 2 +- .../regress/output/multi_large_shardid.source | 4 +- .../multi_master_delete_protocol.source | 6 +- .../regress/output/multi_outer_join.source | 20 +-- .../regress/output/multi_stage_data.source | 30 +--- .../output/multi_stage_large_records.source | 2 +- .../output/multi_stage_more_data.source | 6 +- src/test/regress/output/multi_subquery.source | 31 +++- 35 files changed, 478 insertions(+), 325 deletions(-) diff --git a/src/test/regress/expected/multi_array_agg.out b/src/test/regress/expected/multi_array_agg.out index b473b7967..b3450b767 100644 --- a/src/test/regress/expected/multi_array_agg.out +++ b/src/test/regress/expected/multi_array_agg.out @@ -98,10 +98,10 @@ SELECT l_quantity, count(*), avg(l_extendedprice), array_agg(l_orderkey) FROM li GROUP BY l_quantity ORDER BY l_quantity; l_quantity | count | avg | array_agg ------------+-------+-----------------------+-------------------------------------------------------------------------------------------------- - 1.00 | 17 | 1477.1258823529411765 | {5543,5633,5634,5698,5766,5856,5857,5986,8997,9026,9158,9184,9220,9222,9348,9383,9476} - 2.00 | 19 | 3078.4242105263157895 | {5506,5540,5573,5669,5703,5730,5798,5831,5893,5920,5923,9030,9058,9123,9124,9188,9344,9441,9476} - 3.00 | 14 | 4714.0392857142857143 | {5509,5543,5605,5606,5827,9124,9157,9184,9223,9254,9349,9414,9475,9477} - 4.00 | 19 | 5929.7136842105263158 | {5504,5507,5508,5511,5538,5764,5766,5826,5829,5862,5959,5985,9091,9120,9281,9347,9382,9440,9473} + 1.00 | 17 | 1477.1258823529411765 | {8997,9026,9158,9184,9220,9222,9348,9383,9476,5543,5633,5634,5698,5766,5856,5857,5986} + 2.00 | 19 | 3078.4242105263157895 | {9030,9058,9123,9124,9188,9344,9441,9476,5506,5540,5573,5669,5703,5730,5798,5831,5893,5920,5923} + 3.00 | 14 | 4714.0392857142857143 | {9124,9157,9184,9223,9254,9349,9414,9475,9477,5509,5543,5605,5606,5827} + 4.00 | 19 | 5929.7136842105263158 | {9091,9120,9281,9347,9382,9440,9473,5504,5507,5508,5511,5538,5764,5766,5826,5829,5862,5959,5985} (4 rows) SELECT l_quantity, array_agg(extract (month FROM o_orderdate)) AS my_month @@ -109,10 +109,10 @@ SELECT l_quantity, array_agg(extract (month FROM o_orderdate)) AS my_month AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; l_quantity | my_month ------------+------------------------------------------------ - 1.00 | {9,5,7,5,9,11,11,4,7,7,4,7,4,2,6,3,5} - 2.00 | {11,10,8,5,5,12,3,11,7,11,5,7,6,6,10,1,12,6,5} - 3.00 | {4,9,8,11,7,10,6,7,8,5,8,9,11,3} - 4.00 | {1,5,6,11,12,10,9,6,1,2,5,1,11,6,2,8,2,6,10} + 1.00 | {7,7,4,7,4,2,6,3,5,9,5,7,5,9,11,11,4} + 2.00 | {7,6,6,10,1,12,6,5,11,10,8,5,5,12,3,11,7,11,5} + 3.00 | {10,6,7,8,5,8,9,11,3,4,9,8,11,7} + 4.00 | {11,6,2,8,2,6,10,1,5,6,11,12,10,9,6,1,2,5,1} (4 rows) SELECT l_quantity, array_agg(l_orderkey * 2 + 1) FROM lineitem WHERE l_quantity < 5 @@ -120,10 +120,10 @@ SELECT l_quantity, array_agg(l_orderkey * 2 + 1) FROM lineitem WHERE l_quantity AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; l_quantity | array_agg ------------+--------------------------------------------- - 1.00 | {11269,11397,11713,11715,11973,18317,18445} - 2.00 | {11847,18061,18247,18953} + 1.00 | {18317,18445,11269,11397,11713,11715,11973} + 2.00 | {18061,18247,18953,11847} 3.00 | {18249,18315,18699,18951,18955} - 4.00 | {11653,11659,18241,18765} + 4.00 | {18241,18765,11653,11659} (4 rows) -- Check that we can execute array_agg() with an expression containing NULL values diff --git a/src/test/regress/expected/multi_complex_expressions.out b/src/test/regress/expected/multi_complex_expressions.out index fd6bea47a..f74254f8f 100644 --- a/src/test/regress/expected/multi_complex_expressions.out +++ b/src/test/regress/expected/multi_complex_expressions.out @@ -390,7 +390,7 @@ ORDER BY customer_keys.o_custkey DESC LIMIT 10 OFFSET 20; DEBUG: push down of limit count: 30 -DEBUG: building index "pg_toast_16992_index" on table "pg_toast_16992" +DEBUG: building index "pg_toast_17021_index" on table "pg_toast_17021" o_custkey | total_order_count -----------+------------------- 1466 | 1 diff --git a/src/test/regress/expected/multi_explain.out b/src/test/regress/expected/multi_explain.out index 8cd5a1802..dde4e6ad9 100644 --- a/src/test/regress/expected/multi_explain.out +++ b/src/test/regress/expected/multi_explain.out @@ -34,8 +34,8 @@ EXPLAIN (COSTS FALSE, FORMAT TEXT) GROUP BY l_quantity ORDER BY count_quantity, l_quantity; Distributed Query into pg_merge_job_570000 Executor: Real-Time - Task Count: 6 - Tasks Shown: One of 6 + Task Count: 8 + Tasks Shown: One of 8 -> Task Node: host=localhost port=57637 dbname=regression -> HashAggregate @@ -55,8 +55,8 @@ EXPLAIN (COSTS FALSE, FORMAT JSON) { "Executor": "Real-Time", "Job": { - "Task Count": 6, - "Tasks Shown": "One of 6", + "Task Count": 8, + "Tasks Shown": "One of 8", "Tasks": [ { "Node": "host=localhost port=57637 dbname=regression", @@ -122,8 +122,8 @@ EXPLAIN (COSTS FALSE, FORMAT XML) Real-Time - 6 - One of 6 + 8 + One of 8 host=localhost port=57637 dbname=regression @@ -193,8 +193,8 @@ EXPLAIN (COSTS FALSE, FORMAT YAML) GROUP BY l_quantity ORDER BY count_quantity, l_quantity; - Executor: "Real-Time" Job: - Task Count: 6 - Tasks Shown: "One of 6" + Task Count: 8 + Tasks Shown: "One of 8" Tasks: - Node: "host=localhost port=57637 dbname=regression" Remote Plan: @@ -232,8 +232,8 @@ EXPLAIN (COSTS FALSE, FORMAT TEXT) GROUP BY l_quantity ORDER BY count_quantity, l_quantity; Distributed Query into pg_merge_job_570006 Executor: Real-Time - Task Count: 6 - Tasks Shown: One of 6 + Task Count: 8 + Tasks Shown: One of 8 -> Task Node: host=localhost port=57637 dbname=regression -> HashAggregate @@ -250,8 +250,8 @@ EXPLAIN (COSTS FALSE, VERBOSE TRUE) SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem; Distributed Query into pg_merge_job_570007 Executor: Real-Time - Task Count: 6 - Tasks Shown: One of 6 + Task Count: 8 + Tasks Shown: One of 8 -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate @@ -270,8 +270,8 @@ EXPLAIN (COSTS FALSE) ORDER BY l_quantity LIMIT 10; Distributed Query into pg_merge_job_570008 Executor: Real-Time - Task Count: 6 - Tasks Shown: One of 6 + Task Count: 8 + Tasks Shown: One of 8 -> Task Node: host=localhost port=57637 dbname=regression -> Limit @@ -282,7 +282,7 @@ Distributed Query into pg_merge_job_570008 -> Seq Scan on lineitem_290000 lineitem Filter: (l_quantity < 5.0) -> Hash - -> Seq Scan on orders_290006 orders + -> Seq Scan on orders_290008 orders Master Query -> Limit -> Sort @@ -357,8 +357,8 @@ EXPLAIN (COSTS FALSE) SELECT * FROM lineitem; Distributed Query into pg_merge_job_570012 Executor: Real-Time - Task Count: 6 - Tasks Shown: One of 6 + Task Count: 8 + Tasks Shown: One of 8 -> Task Node: host=localhost port=57637 dbname=regression -> Seq Scan on lineitem_290000 lineitem @@ -370,7 +370,7 @@ EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; Distributed Query into pg_merge_job_570013 Executor: Real-Time - Task Count: 3 + Task Count: 4 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression @@ -380,12 +380,17 @@ Distributed Query into pg_merge_job_570013 -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate - -> Seq Scan on lineitem_290003 lineitem + -> Seq Scan on lineitem_290005 lineitem Filter: (l_orderkey > 9030) -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate - -> Seq Scan on lineitem_290005 lineitem + -> Seq Scan on lineitem_290006 lineitem + Filter: (l_orderkey > 9030) + -> Task + Node: host=localhost port=57638 dbname=regression + -> Aggregate + -> Seq Scan on lineitem_290007 lineitem Filter: (l_orderkey > 9030) Master Query -> Aggregate @@ -403,8 +408,8 @@ EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; Distributed Query into pg_merge_job_570016 Executor: Task-Tracker - Task Count: 3 - Tasks Shown: One of 3 + Task Count: 4 + Tasks Shown: One of 4 -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate @@ -429,7 +434,7 @@ Distributed Query into pg_merge_job_570019 Map Task Count: 1 Merge Task Count: 1 -> MapMergeJob - Map Task Count: 6 + Map Task Count: 8 Merge Task Count: 1 Master Query -> Aggregate @@ -452,7 +457,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON) "Merge Task Count": 1, "Depended Jobs": [ { - "Map Task Count": 6, + "Map Task Count": 8, "Merge Task Count": 1 } ] @@ -502,7 +507,7 @@ EXPLAIN (COSTS FALSE, FORMAT XML) 1 - 6 + 8 1 @@ -548,7 +553,7 @@ EXPLAIN (COSTS FALSE, FORMAT YAML) - Map Task Count: 1 Merge Task Count: 1 Depended Jobs: - - Map Task Count: 6 + - Map Task Count: 8 Merge Task Count: 1 Master Query: - Plan: diff --git a/src/test/regress/expected/multi_join_order_additional.out b/src/test/regress/expected/multi_join_order_additional.out index 51b357dee..7254b56ba 100644 --- a/src/test/regress/expected/multi_join_order_additional.out +++ b/src/test/regress/expected/multi_join_order_additional.out @@ -99,34 +99,50 @@ SELECT master_create_worker_shards('customer_hash', 2, 1); EXPLAIN SELECT l1.l_quantity FROM lineitem l1, lineitem l2 WHERE l1.l_orderkey = l2.l_orderkey AND l1.l_quantity > 5; LOG: join order: [ "lineitem" ][ local partition join "lineitem" ] -DEBUG: join prunable for intervals [1,2496] and [2497,4964] -DEBUG: join prunable for intervals [1,2496] and [4965,5986] -DEBUG: join prunable for intervals [1,2496] and [8997,11554] -DEBUG: join prunable for intervals [1,2496] and [11554,13920] -DEBUG: join prunable for intervals [1,2496] and [13921,14947] -DEBUG: join prunable for intervals [2497,4964] and [1,2496] -DEBUG: join prunable for intervals [2497,4964] and [4965,5986] -DEBUG: join prunable for intervals [2497,4964] and [8997,11554] -DEBUG: join prunable for intervals [2497,4964] and [11554,13920] -DEBUG: join prunable for intervals [2497,4964] and [13921,14947] -DEBUG: join prunable for intervals [4965,5986] and [1,2496] -DEBUG: join prunable for intervals [4965,5986] and [2497,4964] -DEBUG: join prunable for intervals [4965,5986] and [8997,11554] -DEBUG: join prunable for intervals [4965,5986] and [11554,13920] -DEBUG: join prunable for intervals [4965,5986] and [13921,14947] -DEBUG: join prunable for intervals [8997,11554] and [1,2496] -DEBUG: join prunable for intervals [8997,11554] and [2497,4964] -DEBUG: join prunable for intervals [8997,11554] and [4965,5986] -DEBUG: join prunable for intervals [8997,11554] and [13921,14947] -DEBUG: join prunable for intervals [11554,13920] and [1,2496] -DEBUG: join prunable for intervals [11554,13920] and [2497,4964] -DEBUG: join prunable for intervals [11554,13920] and [4965,5986] -DEBUG: join prunable for intervals [11554,13920] and [13921,14947] -DEBUG: join prunable for intervals [13921,14947] and [1,2496] -DEBUG: join prunable for intervals [13921,14947] and [2497,4964] -DEBUG: join prunable for intervals [13921,14947] and [4965,5986] -DEBUG: join prunable for intervals [13921,14947] and [8997,11554] -DEBUG: join prunable for intervals [13921,14947] and [11554,13920] +DEBUG: join prunable for intervals [1,1509] and [2951,4455] +DEBUG: join prunable for intervals [1,1509] and [4480,5986] +DEBUG: join prunable for intervals [1,1509] and [8997,10560] +DEBUG: join prunable for intervals [1,1509] and [10560,12036] +DEBUG: join prunable for intervals [1,1509] and [12036,13473] +DEBUG: join prunable for intervals [1,1509] and [13473,14947] +DEBUG: join prunable for intervals [1509,4964] and [8997,10560] +DEBUG: join prunable for intervals [1509,4964] and [10560,12036] +DEBUG: join prunable for intervals [1509,4964] and [12036,13473] +DEBUG: join prunable for intervals [1509,4964] and [13473,14947] +DEBUG: join prunable for intervals [2951,4455] and [1,1509] +DEBUG: join prunable for intervals [2951,4455] and [4480,5986] +DEBUG: join prunable for intervals [2951,4455] and [8997,10560] +DEBUG: join prunable for intervals [2951,4455] and [10560,12036] +DEBUG: join prunable for intervals [2951,4455] and [12036,13473] +DEBUG: join prunable for intervals [2951,4455] and [13473,14947] +DEBUG: join prunable for intervals [4480,5986] and [1,1509] +DEBUG: join prunable for intervals [4480,5986] and [2951,4455] +DEBUG: join prunable for intervals [4480,5986] and [8997,10560] +DEBUG: join prunable for intervals [4480,5986] and [10560,12036] +DEBUG: join prunable for intervals [4480,5986] and [12036,13473] +DEBUG: join prunable for intervals [4480,5986] and [13473,14947] +DEBUG: join prunable for intervals [8997,10560] and [1,1509] +DEBUG: join prunable for intervals [8997,10560] and [1509,4964] +DEBUG: join prunable for intervals [8997,10560] and [2951,4455] +DEBUG: join prunable for intervals [8997,10560] and [4480,5986] +DEBUG: join prunable for intervals [8997,10560] and [12036,13473] +DEBUG: join prunable for intervals [8997,10560] and [13473,14947] +DEBUG: join prunable for intervals [10560,12036] and [1,1509] +DEBUG: join prunable for intervals [10560,12036] and [1509,4964] +DEBUG: join prunable for intervals [10560,12036] and [2951,4455] +DEBUG: join prunable for intervals [10560,12036] and [4480,5986] +DEBUG: join prunable for intervals [10560,12036] and [13473,14947] +DEBUG: join prunable for intervals [12036,13473] and [1,1509] +DEBUG: join prunable for intervals [12036,13473] and [1509,4964] +DEBUG: join prunable for intervals [12036,13473] and [2951,4455] +DEBUG: join prunable for intervals [12036,13473] and [4480,5986] +DEBUG: join prunable for intervals [12036,13473] and [8997,10560] +DEBUG: join prunable for intervals [13473,14947] and [1,1509] +DEBUG: join prunable for intervals [13473,14947] and [1509,4964] +DEBUG: join prunable for intervals [13473,14947] and [2951,4455] +DEBUG: join prunable for intervals [13473,14947] and [4480,5986] +DEBUG: join prunable for intervals [13473,14947] and [8997,10560] +DEBUG: join prunable for intervals [13473,14947] and [10560,12036] QUERY PLAN ------------------------------------------------------------ explain statements for distributed queries are not enabled diff --git a/src/test/regress/expected/multi_join_pruning.out b/src/test/regress/expected/multi_join_pruning.out index fd77d607b..d1ca33a89 100644 --- a/src/test/regress/expected/multi_join_pruning.out +++ b/src/test/regress/expected/multi_join_pruning.out @@ -11,12 +11,14 @@ SET client_min_messages TO DEBUG2; SET citus.large_table_shard_count TO 2; SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey; -DEBUG: join prunable for intervals [1,2496] and [8997,14946] -DEBUG: join prunable for intervals [2497,4964] and [8997,14946] -DEBUG: join prunable for intervals [4965,5986] and [8997,14946] -DEBUG: join prunable for intervals [8997,11554] and [1,5986] -DEBUG: join prunable for intervals [11554,13920] and [1,5986] -DEBUG: join prunable for intervals [13921,14947] and [1,5986] +DEBUG: join prunable for intervals [1,1509] and [8997,14946] +DEBUG: join prunable for intervals [1509,2951] and [8997,14946] +DEBUG: join prunable for intervals [2951,4455] and [8997,14946] +DEBUG: join prunable for intervals [4480,5986] and [8997,14946] +DEBUG: join prunable for intervals [8997,10560] and [1,5986] +DEBUG: join prunable for intervals [10560,12036] and [1,5986] +DEBUG: join prunable for intervals [12036,13473] and [1,5986] +DEBUG: join prunable for intervals [13473,14947] and [1,5986] sum | avg -------+-------------------- 36086 | 3.0076679446574429 @@ -27,9 +29,11 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders DEBUG: predicate pruning for shardId 290000 DEBUG: predicate pruning for shardId 290001 DEBUG: predicate pruning for shardId 290002 -DEBUG: join prunable for intervals [8997,11554] and [1,5986] -DEBUG: join prunable for intervals [11554,13920] and [1,5986] -DEBUG: join prunable for intervals [13921,14947] and [1,5986] +DEBUG: predicate pruning for shardId 290003 +DEBUG: join prunable for intervals [8997,10560] and [1,5986] +DEBUG: join prunable for intervals [10560,12036] and [1,5986] +DEBUG: join prunable for intervals [12036,13473] and [1,5986] +DEBUG: join prunable for intervals [13473,14947] and [1,5986] sum | avg -------+-------------------- 17996 | 3.0194630872483221 @@ -45,6 +49,8 @@ DEBUG: predicate pruning for shardId 290002 DEBUG: predicate pruning for shardId 290003 DEBUG: predicate pruning for shardId 290004 DEBUG: predicate pruning for shardId 290005 +DEBUG: predicate pruning for shardId 290006 +DEBUG: predicate pruning for shardId 290007 sum | avg -----+----- | @@ -58,10 +64,12 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders DEBUG: predicate pruning for shardId 290000 DEBUG: predicate pruning for shardId 290001 DEBUG: predicate pruning for shardId 290002 -DEBUG: predicate pruning for shardId 290007 -DEBUG: join prunable for intervals [8997,11554] and [1,5986] -DEBUG: join prunable for intervals [11554,13920] and [1,5986] -DEBUG: join prunable for intervals [13921,14947] and [1,5986] +DEBUG: predicate pruning for shardId 290003 +DEBUG: predicate pruning for shardId 290009 +DEBUG: join prunable for intervals [8997,10560] and [1,5986] +DEBUG: join prunable for intervals [10560,12036] and [1,5986] +DEBUG: join prunable for intervals [12036,13473] and [1,5986] +DEBUG: join prunable for intervals [13473,14947] and [1,5986] sum | avg -----+----- | diff --git a/src/test/regress/expected/multi_large_table_join_planning.out b/src/test/regress/expected/multi_large_table_join_planning.out index d9a8f8f7b..b11ea9162 100644 --- a/src/test/regress/expected/multi_large_table_join_planning.out +++ b/src/test/regress/expected/multi_large_table_join_planning.out @@ -45,40 +45,48 @@ GROUP BY ORDER BY l_partkey, o_orderkey; DEBUG: StartTransactionCommand -DEBUG: join prunable for intervals [1,2496] and [8997,14946] -DEBUG: join prunable for intervals [2497,4964] and [8997,14946] -DEBUG: join prunable for intervals [4965,5986] and [8997,14946] -DEBUG: join prunable for intervals [8997,11554] and [1,5986] -DEBUG: join prunable for intervals [11554,13920] and [1,5986] -DEBUG: join prunable for intervals [13921,14947] and [1,5986] +DEBUG: join prunable for intervals [1,1509] and [8997,14946] +DEBUG: join prunable for intervals [1509,4964] and [8997,14946] +DEBUG: join prunable for intervals [2951,4455] and [8997,14946] +DEBUG: join prunable for intervals [4480,5986] and [8997,14946] +DEBUG: join prunable for intervals [8997,10560] and [1,5986] +DEBUG: join prunable for intervals [10560,12036] and [1,5986] +DEBUG: join prunable for intervals [12036,13473] and [1,5986] +DEBUG: join prunable for intervals [13473,14947] and [1,5986] DEBUG: generated sql query for job 1250 and task 3 -DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290000 lineitem JOIN orders_290006 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" +DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290000 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" DEBUG: generated sql query for job 1250 and task 6 -DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290001 lineitem JOIN orders_290006 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" +DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290001 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" DEBUG: generated sql query for job 1250 and task 9 -DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290002 lineitem JOIN orders_290006 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" +DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290002 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" DEBUG: generated sql query for job 1250 and task 12 -DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290003 lineitem JOIN orders_290007 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" +DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290003 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" DEBUG: generated sql query for job 1250 and task 15 -DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290004 lineitem JOIN orders_290007 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" +DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290004 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" DEBUG: generated sql query for job 1250 and task 18 -DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290005 lineitem JOIN orders_290007 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" +DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290005 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" +DEBUG: generated sql query for job 1250 and task 21 +DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290006 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" +DEBUG: generated sql query for job 1250 and task 24 +DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290007 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" DEBUG: assigned task 3 to node localhost:57637 DEBUG: assigned task 6 to node localhost:57638 DEBUG: assigned task 9 to node localhost:57637 DEBUG: assigned task 12 to node localhost:57638 DEBUG: assigned task 15 to node localhost:57637 DEBUG: assigned task 18 to node localhost:57638 +DEBUG: assigned task 21 to node localhost:57637 +DEBUG: assigned task 24 to node localhost:57638 DEBUG: join prunable for intervals [1,1000] and [6001,7000] DEBUG: join prunable for intervals [6001,7000] and [1,1000] DEBUG: generated sql query for job 1251 and task 3 -DETAIL: query string: "SELECT "pg_merge_job_1250.task_000019".intermediate_column_1250_0, "pg_merge_job_1250.task_000019".intermediate_column_1250_1, "pg_merge_job_1250.task_000019".intermediate_column_1250_2, "pg_merge_job_1250.task_000019".intermediate_column_1250_3, "pg_merge_job_1250.task_000019".intermediate_column_1250_4 FROM (pg_merge_job_1250.task_000019 "pg_merge_job_1250.task_000019" JOIN part_290010 part ON (("pg_merge_job_1250.task_000019".intermediate_column_1250_0 = part.p_partkey))) WHERE (part.p_size > 8)" +DETAIL: query string: "SELECT "pg_merge_job_1250.task_000025".intermediate_column_1250_0, "pg_merge_job_1250.task_000025".intermediate_column_1250_1, "pg_merge_job_1250.task_000025".intermediate_column_1250_2, "pg_merge_job_1250.task_000025".intermediate_column_1250_3, "pg_merge_job_1250.task_000025".intermediate_column_1250_4 FROM (pg_merge_job_1250.task_000025 "pg_merge_job_1250.task_000025" JOIN part_290012 part ON (("pg_merge_job_1250.task_000025".intermediate_column_1250_0 = part.p_partkey))) WHERE (part.p_size > 8)" DEBUG: generated sql query for job 1251 and task 6 -DETAIL: query string: "SELECT "pg_merge_job_1250.task_000026".intermediate_column_1250_0, "pg_merge_job_1250.task_000026".intermediate_column_1250_1, "pg_merge_job_1250.task_000026".intermediate_column_1250_2, "pg_merge_job_1250.task_000026".intermediate_column_1250_3, "pg_merge_job_1250.task_000026".intermediate_column_1250_4 FROM (pg_merge_job_1250.task_000026 "pg_merge_job_1250.task_000026" JOIN part_280002 part ON (("pg_merge_job_1250.task_000026".intermediate_column_1250_0 = part.p_partkey))) WHERE (part.p_size > 8)" +DETAIL: query string: "SELECT "pg_merge_job_1250.task_000034".intermediate_column_1250_0, "pg_merge_job_1250.task_000034".intermediate_column_1250_1, "pg_merge_job_1250.task_000034".intermediate_column_1250_2, "pg_merge_job_1250.task_000034".intermediate_column_1250_3, "pg_merge_job_1250.task_000034".intermediate_column_1250_4 FROM (pg_merge_job_1250.task_000034 "pg_merge_job_1250.task_000034" JOIN part_280002 part ON (("pg_merge_job_1250.task_000034".intermediate_column_1250_0 = part.p_partkey))) WHERE (part.p_size > 8)" DEBUG: pruning merge fetch taskId 1 -DETAIL: Creating dependency on merge taskId 19 +DETAIL: Creating dependency on merge taskId 25 DEBUG: pruning merge fetch taskId 4 -DETAIL: Creating dependency on merge taskId 26 +DETAIL: Creating dependency on merge taskId 34 DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 3 to node localhost:57638 DEBUG: join prunable for intervals [1,1000] and [1001,2000] @@ -88,7 +96,7 @@ DEBUG: join prunable for intervals [1001,2000] and [6001,7000] DEBUG: join prunable for intervals [6001,7000] and [1,1000] DEBUG: join prunable for intervals [6001,7000] and [1001,2000] DEBUG: generated sql query for job 1252 and task 3 -DETAIL: query string: "SELECT "pg_merge_job_1251.task_000007".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000007".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000007 "pg_merge_job_1251.task_000007" JOIN customer_290008 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000007".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000007".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000007".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000007".intermediate_column_1251_0, "pg_merge_job_1251.task_000007".intermediate_column_1251_1" +DETAIL: query string: "SELECT "pg_merge_job_1251.task_000007".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000007".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000007 "pg_merge_job_1251.task_000007" JOIN customer_290010 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000007".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000007".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000007".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000007".intermediate_column_1251_0, "pg_merge_job_1251.task_000007".intermediate_column_1251_1" DEBUG: generated sql query for job 1252 and task 6 DETAIL: query string: "SELECT "pg_merge_job_1251.task_000010".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000010".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000010 "pg_merge_job_1251.task_000010" JOIN customer_280001 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000010".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000010".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000010".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000010".intermediate_column_1251_0, "pg_merge_job_1251.task_000010".intermediate_column_1251_1" DEBUG: generated sql query for job 1252 and task 9 @@ -166,16 +174,22 @@ DEBUG: generated sql query for job 1253 and task 10 DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290004 lineitem WHERE (l_quantity < 5.0)" DEBUG: generated sql query for job 1253 and task 12 DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290005 lineitem WHERE (l_quantity < 5.0)" +DEBUG: generated sql query for job 1253 and task 14 +DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290006 lineitem WHERE (l_quantity < 5.0)" +DEBUG: generated sql query for job 1253 and task 16 +DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290007 lineitem WHERE (l_quantity < 5.0)" DEBUG: assigned task 2 to node localhost:57637 DEBUG: assigned task 4 to node localhost:57638 DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 8 to node localhost:57638 DEBUG: assigned task 10 to node localhost:57637 DEBUG: assigned task 12 to node localhost:57638 +DEBUG: assigned task 14 to node localhost:57637 +DEBUG: assigned task 16 to node localhost:57638 DEBUG: generated sql query for job 1254 and task 2 -DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290006 orders WHERE (o_totalprice <> 4.0)" +DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290008 orders WHERE (o_totalprice <> 4.0)" DEBUG: generated sql query for job 1254 and task 4 -DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290007 orders WHERE (o_totalprice <> 4.0)" +DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290009 orders WHERE (o_totalprice <> 4.0)" DEBUG: assigned task 2 to node localhost:57637 DEBUG: assigned task 4 to node localhost:57638 DEBUG: join prunable for task partitionId 0 and 1 @@ -191,27 +205,27 @@ DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 2 DEBUG: generated sql query for job 1255 and task 3 -DETAIL: query string: "SELECT "pg_merge_job_1253.task_000013".intermediate_column_1253_0 AS l_partkey, "pg_merge_job_1254.task_000005".intermediate_column_1254_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1253.task_000013 "pg_merge_job_1253.task_000013" JOIN pg_merge_job_1254.task_000005 "pg_merge_job_1254.task_000005" ON (("pg_merge_job_1253.task_000013".intermediate_column_1253_1 = "pg_merge_job_1254.task_000005".intermediate_column_1254_1))) WHERE true GROUP BY "pg_merge_job_1253.task_000013".intermediate_column_1253_0, "pg_merge_job_1254.task_000005".intermediate_column_1254_0" +DETAIL: query string: "SELECT "pg_merge_job_1253.task_000017".intermediate_column_1253_0 AS l_partkey, "pg_merge_job_1254.task_000005".intermediate_column_1254_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1253.task_000017 "pg_merge_job_1253.task_000017" JOIN pg_merge_job_1254.task_000005 "pg_merge_job_1254.task_000005" ON (("pg_merge_job_1253.task_000017".intermediate_column_1253_1 = "pg_merge_job_1254.task_000005".intermediate_column_1254_1))) WHERE true GROUP BY "pg_merge_job_1253.task_000017".intermediate_column_1253_0, "pg_merge_job_1254.task_000005".intermediate_column_1254_0" DEBUG: generated sql query for job 1255 and task 6 -DETAIL: query string: "SELECT "pg_merge_job_1253.task_000020".intermediate_column_1253_0 AS l_partkey, "pg_merge_job_1254.task_000008".intermediate_column_1254_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1253.task_000020 "pg_merge_job_1253.task_000020" JOIN pg_merge_job_1254.task_000008 "pg_merge_job_1254.task_000008" ON (("pg_merge_job_1253.task_000020".intermediate_column_1253_1 = "pg_merge_job_1254.task_000008".intermediate_column_1254_1))) WHERE true GROUP BY "pg_merge_job_1253.task_000020".intermediate_column_1253_0, "pg_merge_job_1254.task_000008".intermediate_column_1254_0" +DETAIL: query string: "SELECT "pg_merge_job_1253.task_000026".intermediate_column_1253_0 AS l_partkey, "pg_merge_job_1254.task_000008".intermediate_column_1254_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1253.task_000026 "pg_merge_job_1253.task_000026" JOIN pg_merge_job_1254.task_000008 "pg_merge_job_1254.task_000008" ON (("pg_merge_job_1253.task_000026".intermediate_column_1253_1 = "pg_merge_job_1254.task_000008".intermediate_column_1254_1))) WHERE true GROUP BY "pg_merge_job_1253.task_000026".intermediate_column_1253_0, "pg_merge_job_1254.task_000008".intermediate_column_1254_0" DEBUG: generated sql query for job 1255 and task 9 -DETAIL: query string: "SELECT "pg_merge_job_1253.task_000027".intermediate_column_1253_0 AS l_partkey, "pg_merge_job_1254.task_000011".intermediate_column_1254_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1253.task_000027 "pg_merge_job_1253.task_000027" JOIN pg_merge_job_1254.task_000011 "pg_merge_job_1254.task_000011" ON (("pg_merge_job_1253.task_000027".intermediate_column_1253_1 = "pg_merge_job_1254.task_000011".intermediate_column_1254_1))) WHERE true GROUP BY "pg_merge_job_1253.task_000027".intermediate_column_1253_0, "pg_merge_job_1254.task_000011".intermediate_column_1254_0" +DETAIL: query string: "SELECT "pg_merge_job_1253.task_000035".intermediate_column_1253_0 AS l_partkey, "pg_merge_job_1254.task_000011".intermediate_column_1254_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1253.task_000035 "pg_merge_job_1253.task_000035" JOIN pg_merge_job_1254.task_000011 "pg_merge_job_1254.task_000011" ON (("pg_merge_job_1253.task_000035".intermediate_column_1253_1 = "pg_merge_job_1254.task_000011".intermediate_column_1254_1))) WHERE true GROUP BY "pg_merge_job_1253.task_000035".intermediate_column_1253_0, "pg_merge_job_1254.task_000011".intermediate_column_1254_0" DEBUG: generated sql query for job 1255 and task 12 -DETAIL: query string: "SELECT "pg_merge_job_1253.task_000034".intermediate_column_1253_0 AS l_partkey, "pg_merge_job_1254.task_000014".intermediate_column_1254_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1253.task_000034 "pg_merge_job_1253.task_000034" JOIN pg_merge_job_1254.task_000014 "pg_merge_job_1254.task_000014" ON (("pg_merge_job_1253.task_000034".intermediate_column_1253_1 = "pg_merge_job_1254.task_000014".intermediate_column_1254_1))) WHERE true GROUP BY "pg_merge_job_1253.task_000034".intermediate_column_1253_0, "pg_merge_job_1254.task_000014".intermediate_column_1254_0" +DETAIL: query string: "SELECT "pg_merge_job_1253.task_000044".intermediate_column_1253_0 AS l_partkey, "pg_merge_job_1254.task_000014".intermediate_column_1254_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1253.task_000044 "pg_merge_job_1253.task_000044" JOIN pg_merge_job_1254.task_000014 "pg_merge_job_1254.task_000014" ON (("pg_merge_job_1253.task_000044".intermediate_column_1253_1 = "pg_merge_job_1254.task_000014".intermediate_column_1254_1))) WHERE true GROUP BY "pg_merge_job_1253.task_000044".intermediate_column_1253_0, "pg_merge_job_1254.task_000014".intermediate_column_1254_0" DEBUG: pruning merge fetch taskId 1 -DETAIL: Creating dependency on merge taskId 13 +DETAIL: Creating dependency on merge taskId 17 DEBUG: pruning merge fetch taskId 2 DETAIL: Creating dependency on merge taskId 5 DEBUG: pruning merge fetch taskId 4 -DETAIL: Creating dependency on merge taskId 20 +DETAIL: Creating dependency on merge taskId 26 DEBUG: pruning merge fetch taskId 5 DETAIL: Creating dependency on merge taskId 8 DEBUG: pruning merge fetch taskId 7 -DETAIL: Creating dependency on merge taskId 27 +DETAIL: Creating dependency on merge taskId 35 DEBUG: pruning merge fetch taskId 8 DETAIL: Creating dependency on merge taskId 11 DEBUG: pruning merge fetch taskId 10 -DETAIL: Creating dependency on merge taskId 34 +DETAIL: Creating dependency on merge taskId 44 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 14 DEBUG: assigned task 3 to node localhost:57638 diff --git a/src/test/regress/expected/multi_large_table_pruning.out b/src/test/regress/expected/multi_large_table_pruning.out index cb377cf5f..492261a6b 100644 --- a/src/test/regress/expected/multi_large_table_pruning.out +++ b/src/test/regress/expected/multi_large_table_pruning.out @@ -42,8 +42,8 @@ FROM WHERE o_custkey = c_custkey AND o_orderkey < 0; -DEBUG: predicate pruning for shardId 290006 -DEBUG: predicate pruning for shardId 290007 +DEBUG: predicate pruning for shardId 290008 +DEBUG: predicate pruning for shardId 290009 count ------- @@ -58,7 +58,7 @@ FROM WHERE o_custkey = c_custkey AND c_custkey < 0; -DEBUG: predicate pruning for shardId 290008 +DEBUG: predicate pruning for shardId 290010 DEBUG: predicate pruning for shardId 280001 DEBUG: predicate pruning for shardId 280000 count @@ -88,19 +88,19 @@ DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 2 DEBUG: pruning merge fetch taskId 1 -DETAIL: Creating dependency on merge taskId 13 +DETAIL: Creating dependency on merge taskId 17 DEBUG: pruning merge fetch taskId 2 DETAIL: Creating dependency on merge taskId 7 DEBUG: pruning merge fetch taskId 4 -DETAIL: Creating dependency on merge taskId 20 +DETAIL: Creating dependency on merge taskId 26 DEBUG: pruning merge fetch taskId 5 DETAIL: Creating dependency on merge taskId 11 DEBUG: pruning merge fetch taskId 7 -DETAIL: Creating dependency on merge taskId 27 +DETAIL: Creating dependency on merge taskId 35 DEBUG: pruning merge fetch taskId 8 DETAIL: Creating dependency on merge taskId 15 DEBUG: pruning merge fetch taskId 10 -DETAIL: Creating dependency on merge taskId 34 +DETAIL: Creating dependency on merge taskId 44 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 19 count @@ -123,6 +123,8 @@ DEBUG: predicate pruning for shardId 290002 DEBUG: predicate pruning for shardId 290003 DEBUG: predicate pruning for shardId 290004 DEBUG: predicate pruning for shardId 290005 +DEBUG: predicate pruning for shardId 290006 +DEBUG: predicate pruning for shardId 290007 count ------- diff --git a/src/test/regress/expected/multi_large_table_task_assignment.out b/src/test/regress/expected/multi_large_table_task_assignment.out index c5a95c149..ed3c6d634 100644 --- a/src/test/regress/expected/multi_large_table_task_assignment.out +++ b/src/test/regress/expected/multi_large_table_task_assignment.out @@ -72,58 +72,110 @@ DEBUG: assigned task 18 to node localhost:57637 DEBUG: assigned task 12 to node localhost:57638 DEBUG: assigned task 3 to node localhost:57637 DEBUG: assigned task 6 to node localhost:57638 -DEBUG: join prunable for intervals [1,2496] and [2497,4964] -DEBUG: join prunable for intervals [1,2496] and [4965,5986] -DEBUG: join prunable for intervals [1,2496] and [8997,11554] -DEBUG: join prunable for intervals [1,2496] and [11554,13920] -DEBUG: join prunable for intervals [1,2496] and [13921,14947] -DEBUG: join prunable for intervals [2497,4964] and [1,2496] -DEBUG: join prunable for intervals [2497,4964] and [4965,5986] -DEBUG: join prunable for intervals [2497,4964] and [8997,11554] -DEBUG: join prunable for intervals [2497,4964] and [11554,13920] -DEBUG: join prunable for intervals [2497,4964] and [13921,14947] -DEBUG: join prunable for intervals [4965,5986] and [1,2496] -DEBUG: join prunable for intervals [4965,5986] and [2497,4964] -DEBUG: join prunable for intervals [4965,5986] and [8997,11554] -DEBUG: join prunable for intervals [4965,5986] and [11554,13920] -DEBUG: join prunable for intervals [4965,5986] and [13921,14947] -DEBUG: join prunable for intervals [8997,11554] and [1,2496] -DEBUG: join prunable for intervals [8997,11554] and [2497,4964] -DEBUG: join prunable for intervals [8997,11554] and [4965,5986] -DEBUG: join prunable for intervals [8997,11554] and [13921,14947] -DEBUG: join prunable for intervals [11554,13920] and [1,2496] -DEBUG: join prunable for intervals [11554,13920] and [2497,4964] -DEBUG: join prunable for intervals [11554,13920] and [4965,5986] -DEBUG: join prunable for intervals [11554,13920] and [13921,14947] -DEBUG: join prunable for intervals [13921,14947] and [1,2496] -DEBUG: join prunable for intervals [13921,14947] and [2497,4964] -DEBUG: join prunable for intervals [13921,14947] and [4965,5986] -DEBUG: join prunable for intervals [13921,14947] and [8997,11554] -DEBUG: join prunable for intervals [13921,14947] and [11554,13920] +DEBUG: join prunable for intervals [1,1509] and [2951,4455] +DEBUG: join prunable for intervals [1,1509] and [4480,5986] +DEBUG: join prunable for intervals [1,1509] and [8997,10560] +DEBUG: join prunable for intervals [1,1509] and [10560,12036] +DEBUG: join prunable for intervals [1,1509] and [12036,13473] +DEBUG: join prunable for intervals [1,1509] and [13473,14947] +DEBUG: join prunable for intervals [1509,4964] and [8997,10560] +DEBUG: join prunable for intervals [1509,4964] and [10560,12036] +DEBUG: join prunable for intervals [1509,4964] and [12036,13473] +DEBUG: join prunable for intervals [1509,4964] and [13473,14947] +DEBUG: join prunable for intervals [2951,4455] and [1,1509] +DEBUG: join prunable for intervals [2951,4455] and [4480,5986] +DEBUG: join prunable for intervals [2951,4455] and [8997,10560] +DEBUG: join prunable for intervals [2951,4455] and [10560,12036] +DEBUG: join prunable for intervals [2951,4455] and [12036,13473] +DEBUG: join prunable for intervals [2951,4455] and [13473,14947] +DEBUG: join prunable for intervals [4480,5986] and [1,1509] +DEBUG: join prunable for intervals [4480,5986] and [2951,4455] +DEBUG: join prunable for intervals [4480,5986] and [8997,10560] +DEBUG: join prunable for intervals [4480,5986] and [10560,12036] +DEBUG: join prunable for intervals [4480,5986] and [12036,13473] +DEBUG: join prunable for intervals [4480,5986] and [13473,14947] +DEBUG: join prunable for intervals [8997,10560] and [1,1509] +DEBUG: join prunable for intervals [8997,10560] and [1509,4964] +DEBUG: join prunable for intervals [8997,10560] and [2951,4455] +DEBUG: join prunable for intervals [8997,10560] and [4480,5986] +DEBUG: join prunable for intervals [8997,10560] and [12036,13473] +DEBUG: join prunable for intervals [8997,10560] and [13473,14947] +DEBUG: join prunable for intervals [10560,12036] and [1,1509] +DEBUG: join prunable for intervals [10560,12036] and [1509,4964] +DEBUG: join prunable for intervals [10560,12036] and [2951,4455] +DEBUG: join prunable for intervals [10560,12036] and [4480,5986] +DEBUG: join prunable for intervals [10560,12036] and [13473,14947] +DEBUG: join prunable for intervals [12036,13473] and [1,1509] +DEBUG: join prunable for intervals [12036,13473] and [1509,4964] +DEBUG: join prunable for intervals [12036,13473] and [2951,4455] +DEBUG: join prunable for intervals [12036,13473] and [4480,5986] +DEBUG: join prunable for intervals [12036,13473] and [8997,10560] +DEBUG: join prunable for intervals [13473,14947] and [1,1509] +DEBUG: join prunable for intervals [13473,14947] and [1509,4964] +DEBUG: join prunable for intervals [13473,14947] and [2951,4455] +DEBUG: join prunable for intervals [13473,14947] and [4480,5986] +DEBUG: join prunable for intervals [13473,14947] and [8997,10560] +DEBUG: join prunable for intervals [13473,14947] and [10560,12036] DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 19 DEBUG: pruning merge fetch taskId 4 -DETAIL: Creating dependency on merge taskId 26 +DETAIL: Creating dependency on merge taskId 19 DEBUG: pruning merge fetch taskId 7 -DETAIL: Creating dependency on merge taskId 33 +DETAIL: Creating dependency on merge taskId 26 DEBUG: pruning merge fetch taskId 10 -DETAIL: Creating dependency on merge taskId 40 +DETAIL: Creating dependency on merge taskId 26 DEBUG: pruning merge fetch taskId 13 -DETAIL: Creating dependency on merge taskId 40 +DETAIL: Creating dependency on merge taskId 26 DEBUG: pruning merge fetch taskId 16 -DETAIL: Creating dependency on merge taskId 47 +DETAIL: Creating dependency on merge taskId 26 DEBUG: pruning merge fetch taskId 19 -DETAIL: Creating dependency on merge taskId 47 +DETAIL: Creating dependency on merge taskId 33 DEBUG: pruning merge fetch taskId 22 +DETAIL: Creating dependency on merge taskId 33 +DEBUG: pruning merge fetch taskId 25 +DETAIL: Creating dependency on merge taskId 40 +DEBUG: pruning merge fetch taskId 28 +DETAIL: Creating dependency on merge taskId 40 +DEBUG: pruning merge fetch taskId 31 +DETAIL: Creating dependency on merge taskId 47 +DEBUG: pruning merge fetch taskId 34 +DETAIL: Creating dependency on merge taskId 47 +DEBUG: pruning merge fetch taskId 37 DETAIL: Creating dependency on merge taskId 54 +DEBUG: pruning merge fetch taskId 40 +DETAIL: Creating dependency on merge taskId 54 +DEBUG: pruning merge fetch taskId 43 +DETAIL: Creating dependency on merge taskId 54 +DEBUG: pruning merge fetch taskId 46 +DETAIL: Creating dependency on merge taskId 61 +DEBUG: pruning merge fetch taskId 49 +DETAIL: Creating dependency on merge taskId 61 +DEBUG: pruning merge fetch taskId 52 +DETAIL: Creating dependency on merge taskId 61 +DEBUG: pruning merge fetch taskId 55 +DETAIL: Creating dependency on merge taskId 68 +DEBUG: pruning merge fetch taskId 58 +DETAIL: Creating dependency on merge taskId 68 DEBUG: assigned task 3 to node localhost:57637 -DEBUG: assigned task 6 to node localhost:57638 +DEBUG: assigned task 21 to node localhost:57638 DEBUG: assigned task 9 to node localhost:57637 -DEBUG: assigned task 12 to node localhost:57638 -DEBUG: assigned task 18 to node localhost:57637 -DEBUG: assigned task 24 to node localhost:57638 -DEBUG: propagating assignment from merge task 40 to constrained sql task 15 -DEBUG: propagating assignment from merge task 47 to constrained sql task 21 +DEBUG: assigned task 27 to node localhost:57638 +DEBUG: assigned task 33 to node localhost:57637 +DEBUG: assigned task 48 to node localhost:57638 +DEBUG: assigned task 39 to node localhost:57637 +DEBUG: assigned task 57 to node localhost:57638 +DEBUG: propagating assignment from merge task 19 to constrained sql task 6 +DEBUG: propagating assignment from merge task 26 to constrained sql task 12 +DEBUG: propagating assignment from merge task 26 to constrained sql task 15 +DEBUG: propagating assignment from merge task 26 to constrained sql task 18 +DEBUG: propagating assignment from merge task 33 to constrained sql task 24 +DEBUG: propagating assignment from merge task 40 to constrained sql task 30 +DEBUG: propagating assignment from merge task 47 to constrained sql task 36 +DEBUG: propagating assignment from merge task 54 to constrained sql task 42 +DEBUG: propagating assignment from merge task 54 to constrained sql task 45 +DEBUG: propagating assignment from merge task 61 to constrained sql task 51 +DEBUG: propagating assignment from merge task 61 to constrained sql task 54 +DEBUG: propagating assignment from merge task 68 to constrained sql task 60 DEBUG: CommitTransactionCommand count ------- @@ -162,6 +214,8 @@ DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 8 to node localhost:57638 DEBUG: assigned task 10 to node localhost:57637 DEBUG: assigned task 12 to node localhost:57638 +DEBUG: assigned task 14 to node localhost:57637 +DEBUG: assigned task 16 to node localhost:57638 DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 4 to node localhost:57638 DEBUG: assigned task 2 to node localhost:57637 @@ -178,19 +232,19 @@ DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 2 DEBUG: pruning merge fetch taskId 1 -DETAIL: Creating dependency on merge taskId 13 +DETAIL: Creating dependency on merge taskId 17 DEBUG: pruning merge fetch taskId 2 DETAIL: Creating dependency on merge taskId 7 DEBUG: pruning merge fetch taskId 4 -DETAIL: Creating dependency on merge taskId 20 +DETAIL: Creating dependency on merge taskId 26 DEBUG: pruning merge fetch taskId 5 DETAIL: Creating dependency on merge taskId 11 DEBUG: pruning merge fetch taskId 7 -DETAIL: Creating dependency on merge taskId 27 +DETAIL: Creating dependency on merge taskId 35 DEBUG: pruning merge fetch taskId 8 DETAIL: Creating dependency on merge taskId 15 DEBUG: pruning merge fetch taskId 10 -DETAIL: Creating dependency on merge taskId 34 +DETAIL: Creating dependency on merge taskId 44 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 19 DEBUG: assigned task 3 to node localhost:57637 diff --git a/src/test/regress/expected/multi_limit_clause_approximate.out b/src/test/regress/expected/multi_limit_clause_approximate.out index 7008a52a8..ef90d54ec 100644 --- a/src/test/regress/expected/multi_limit_clause_approximate.out +++ b/src/test/regress/expected/multi_limit_clause_approximate.out @@ -41,8 +41,8 @@ DEBUG: push down of limit count: 600 153937 | 2761321906 199283 | 2726988572 185925 | 2672114100 + 196629 | 2622637602 157064 | 2614644408 - 189336 | 2596175232 (10 rows) -- Disable limit optimization for our second test. This time, we have a query @@ -81,15 +81,15 @@ DEBUG: push down of limit count: 150 c_custkey | c_name | lineitem_count -----------+--------------------+---------------- 43 | Customer#000000043 | 42 - 370 | Customer#000000370 | 36 + 370 | Customer#000000370 | 38 + 79 | Customer#000000079 | 37 689 | Customer#000000689 | 36 + 472 | Customer#000000472 | 35 + 685 | Customer#000000685 | 35 + 643 | Customer#000000643 | 34 226 | Customer#000000226 | 33 496 | Customer#000000496 | 32 - 685 | Customer#000000685 | 32 304 | Customer#000000304 | 31 - 472 | Customer#000000472 | 31 - 79 | Customer#000000079 | 30 - 145 | Customer#000000145 | 30 (10 rows) RESET citus.large_table_shard_count; diff --git a/src/test/regress/expected/multi_null_minmax_value_pruning.out b/src/test/regress/expected/multi_null_minmax_value_pruning.out index fc3321652..0757254f7 100644 --- a/src/test/regress/expected/multi_null_minmax_value_pruning.out +++ b/src/test/regress/expected/multi_null_minmax_value_pruning.out @@ -11,13 +11,13 @@ SET citus.large_table_shard_count TO 2; SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290000; shardminvalue | shardmaxvalue ---------------+--------------- - 1 | 2496 + 1 | 1509 (1 row) SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290001; shardminvalue | shardmaxvalue ---------------+--------------- - 2497 | 4964 + 1509 | 2951 (1 row) -- Check that partition and join pruning works when min/max values exist @@ -25,8 +25,10 @@ SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290001; SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1; DEBUG: predicate pruning for shardId 290001 DEBUG: predicate pruning for shardId 290002 -DEBUG: predicate pruning for shardId 290004 +DEBUG: predicate pruning for shardId 290003 DEBUG: predicate pruning for shardId 290005 +DEBUG: predicate pruning for shardId 290006 +DEBUG: predicate pruning for shardId 290007 l_orderkey | l_linenumber | l_shipdate ------------+--------------+------------ 1 | 1 | 03-13-1996 @@ -45,12 +47,14 @@ DEBUG: predicate pruning for shardId 290005 SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey; -DEBUG: join prunable for intervals [1,2496] and [8997,14946] -DEBUG: join prunable for intervals [2497,4964] and [8997,14946] -DEBUG: join prunable for intervals [4965,5986] and [8997,14946] -DEBUG: join prunable for intervals [8997,11554] and [1,5986] -DEBUG: join prunable for intervals [11554,13920] and [1,5986] -DEBUG: join prunable for intervals [13921,14947] and [1,5986] +DEBUG: join prunable for intervals [1,1509] and [8997,14946] +DEBUG: join prunable for intervals [1509,2951] and [8997,14946] +DEBUG: join prunable for intervals [2951,4455] and [8997,14946] +DEBUG: join prunable for intervals [4480,5986] and [8997,14946] +DEBUG: join prunable for intervals [8997,10560] and [1,5986] +DEBUG: join prunable for intervals [10560,12036] and [1,5986] +DEBUG: join prunable for intervals [12036,13473] and [1,5986] +DEBUG: join prunable for intervals [13473,14947] and [1,5986] sum | avg -------+-------------------- 36086 | 3.0076679446574429 @@ -62,8 +66,10 @@ UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 290000; SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; DEBUG: predicate pruning for shardId 290001 DEBUG: predicate pruning for shardId 290002 -DEBUG: predicate pruning for shardId 290004 +DEBUG: predicate pruning for shardId 290003 DEBUG: predicate pruning for shardId 290005 +DEBUG: predicate pruning for shardId 290006 +DEBUG: predicate pruning for shardId 290007 l_orderkey | l_linenumber | l_shipdate ------------+--------------+------------ 9030 | 1 | 09-02-1998 @@ -76,11 +82,13 @@ DEBUG: predicate pruning for shardId 290005 SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey; -DEBUG: join prunable for intervals [2497,4964] and [8997,14946] -DEBUG: join prunable for intervals [4965,5986] and [8997,14946] -DEBUG: join prunable for intervals [8997,11554] and [1,5986] -DEBUG: join prunable for intervals [11554,13920] and [1,5986] -DEBUG: join prunable for intervals [13921,14947] and [1,5986] +DEBUG: join prunable for intervals [1509,2951] and [8997,14946] +DEBUG: join prunable for intervals [2951,4455] and [8997,14946] +DEBUG: join prunable for intervals [4480,5986] and [8997,14946] +DEBUG: join prunable for intervals [8997,10560] and [1,5986] +DEBUG: join prunable for intervals [10560,12036] and [1,5986] +DEBUG: join prunable for intervals [12036,13473] and [1,5986] +DEBUG: join prunable for intervals [13473,14947] and [1,5986] sum | avg -------+-------------------- 36086 | 3.0076679446574429 @@ -91,8 +99,10 @@ DEBUG: join prunable for intervals [13921,14947] and [1,5986] UPDATE pg_dist_shard SET shardmaxvalue = NULL WHERE shardid = 290001; SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; DEBUG: predicate pruning for shardId 290002 -DEBUG: predicate pruning for shardId 290004 +DEBUG: predicate pruning for shardId 290003 DEBUG: predicate pruning for shardId 290005 +DEBUG: predicate pruning for shardId 290006 +DEBUG: predicate pruning for shardId 290007 l_orderkey | l_linenumber | l_shipdate ------------+--------------+------------ 9030 | 1 | 09-02-1998 @@ -105,10 +115,12 @@ DEBUG: predicate pruning for shardId 290005 SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey; -DEBUG: join prunable for intervals [4965,5986] and [8997,14946] -DEBUG: join prunable for intervals [8997,11554] and [1,5986] -DEBUG: join prunable for intervals [11554,13920] and [1,5986] -DEBUG: join prunable for intervals [13921,14947] and [1,5986] +DEBUG: join prunable for intervals [2951,4455] and [8997,14946] +DEBUG: join prunable for intervals [4480,5986] and [8997,14946] +DEBUG: join prunable for intervals [8997,10560] and [1,5986] +DEBUG: join prunable for intervals [10560,12036] and [1,5986] +DEBUG: join prunable for intervals [12036,13473] and [1,5986] +DEBUG: join prunable for intervals [13473,14947] and [1,5986] sum | avg -------+-------------------- 36086 | 3.0076679446574429 @@ -120,8 +132,10 @@ UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = 290000; SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; DEBUG: predicate pruning for shardId 290000 DEBUG: predicate pruning for shardId 290002 -DEBUG: predicate pruning for shardId 290004 +DEBUG: predicate pruning for shardId 290003 DEBUG: predicate pruning for shardId 290005 +DEBUG: predicate pruning for shardId 290006 +DEBUG: predicate pruning for shardId 290007 l_orderkey | l_linenumber | l_shipdate ------------+--------------+------------ 9030 | 1 | 09-02-1998 @@ -134,11 +148,13 @@ DEBUG: predicate pruning for shardId 290005 SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey; -DEBUG: join prunable for intervals [0,2496] and [8997,14946] -DEBUG: join prunable for intervals [4965,5986] and [8997,14946] -DEBUG: join prunable for intervals [8997,11554] and [1,5986] -DEBUG: join prunable for intervals [11554,13920] and [1,5986] -DEBUG: join prunable for intervals [13921,14947] and [1,5986] +DEBUG: join prunable for intervals [0,1509] and [8997,14946] +DEBUG: join prunable for intervals [2951,4455] and [8997,14946] +DEBUG: join prunable for intervals [4480,5986] and [8997,14946] +DEBUG: join prunable for intervals [8997,10560] and [1,5986] +DEBUG: join prunable for intervals [10560,12036] and [1,5986] +DEBUG: join prunable for intervals [12036,13473] and [1,5986] +DEBUG: join prunable for intervals [13473,14947] and [1,5986] sum | avg -------+-------------------- 36086 | 3.0076679446574429 diff --git a/src/test/regress/expected/multi_partition_pruning.out b/src/test/regress/expected/multi_partition_pruning.out index 3d305b0e7..446b67cfc 100644 --- a/src/test/regress/expected/multi_partition_pruning.out +++ b/src/test/regress/expected/multi_partition_pruning.out @@ -11,8 +11,10 @@ SET client_min_messages TO DEBUG2; SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1; DEBUG: predicate pruning for shardId 290001 DEBUG: predicate pruning for shardId 290002 -DEBUG: predicate pruning for shardId 290004 +DEBUG: predicate pruning for shardId 290003 DEBUG: predicate pruning for shardId 290005 +DEBUG: predicate pruning for shardId 290006 +DEBUG: predicate pruning for shardId 290007 l_orderkey | l_linenumber | l_shipdate ------------+--------------+------------ 1 | 1 | 03-13-1996 @@ -38,6 +40,7 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem WHERE l_orderkey > 903 DEBUG: predicate pruning for shardId 290000 DEBUG: predicate pruning for shardId 290001 DEBUG: predicate pruning for shardId 290002 +DEBUG: predicate pruning for shardId 290003 sum | avg -------+-------------------- 17999 | 3.0189533713518953 @@ -45,7 +48,7 @@ DEBUG: predicate pruning for shardId 290002 SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem WHERE (l_orderkey < 4000 OR l_orderkey > 9030); -DEBUG: predicate pruning for shardId 290002 +DEBUG: predicate pruning for shardId 290003 sum | avg -------+-------------------- 30184 | 3.0159872102318145 @@ -59,6 +62,8 @@ DEBUG: predicate pruning for shardId 290002 DEBUG: predicate pruning for shardId 290003 DEBUG: predicate pruning for shardId 290004 DEBUG: predicate pruning for shardId 290005 +DEBUG: predicate pruning for shardId 290006 +DEBUG: predicate pruning for shardId 290007 sum | avg -----+----- | diff --git a/src/test/regress/input/multi_agg_distinct.source b/src/test/regress/input/multi_agg_distinct.source index 30cf329a1..590a4b514 100644 --- a/src/test/regress/input/multi_agg_distinct.source +++ b/src/test/regress/input/multi_agg_distinct.source @@ -27,9 +27,19 @@ CREATE TABLE lineitem_range ( l_comment varchar(44) not null ); SELECT master_create_distributed_table('lineitem_range', 'l_orderkey', 'range'); +SELECT master_create_empty_shard('lineitem_range') AS new_shard_id +\gset +UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986 +WHERE shardid = :new_shard_id; + +SELECT master_create_empty_shard('lineitem_range') AS new_shard_id +\gset +UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14947 +WHERE shardid = :new_shard_id; + SET citus.shard_max_size TO "500MB"; -\STAGE lineitem_range FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -\STAGE lineitem_range FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' +\COPY lineitem_range FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +\COPY lineitem_range FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' RESET citus.shard_max_size; -- Run aggregate(distinct) on partition column for range partitioned table diff --git a/src/test/regress/input/multi_agg_type_conversion.source b/src/test/regress/input/multi_agg_type_conversion.source index 3573a2c15..a21f57c64 100644 --- a/src/test/regress/input/multi_agg_type_conversion.source +++ b/src/test/regress/input/multi_agg_type_conversion.source @@ -24,7 +24,7 @@ CREATE TABLE aggregate_type ( interval_value interval not null); SELECT master_create_distributed_table('aggregate_type', 'float_value', 'append'); -\STAGE aggregate_type FROM '@abs_srcdir@/data/agg_type.data' +\COPY aggregate_type FROM '@abs_srcdir@/data/agg_type.data' -- Test conversions using aggregates on floats and division diff --git a/src/test/regress/input/multi_alter_table_statements.source b/src/test/regress/input/multi_alter_table_statements.source index b0a552ce0..e8d341376 100644 --- a/src/test/regress/input/multi_alter_table_statements.source +++ b/src/test/regress/input/multi_alter_table_statements.source @@ -29,7 +29,7 @@ CREATE TABLE lineitem_alter ( l_comment varchar(44) not null ); SELECT master_create_distributed_table('lineitem_alter', 'l_orderkey', 'append'); -\STAGE lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +\COPY lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -- Verify that we can add columns @@ -57,8 +57,8 @@ SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1; ALTER TABLE lineitem_alter ALTER COLUMN float_column SET DEFAULT 1; ALTER TABLE lineitem_alter ALTER COLUMN int_column1 DROP DEFAULT; --- \stage to verify that default values take effect -\STAGE lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +-- \COPY to verify that default values take effect +\COPY lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column; SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1; @@ -71,16 +71,17 @@ ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET NOT NULL; -- Drop default so that NULLs will be inserted for this column ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP DEFAULT; --- \stage should fail because it will try to insert NULLs for a NOT NULL column -\STAGE lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +-- \COPY should fail because it will try to insert NULLs for a NOT NULL column +-- Note, this operation will create a table on the workers but it won't be in the metadata +\COPY lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -- Verify that DROP NOT NULL works ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP NOT NULL; \d lineitem_alter --- \stage should succeed now -\STAGE lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +-- \COPY should succeed now +\COPY lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' SELECT count(*) from lineitem_alter; -- Verify that SET DATA TYPE works @@ -258,7 +259,8 @@ DROP TABLESPACE super_fast_ssd; SET citus.enable_ddl_propagation to true; SELECT master_apply_delete_command('DELETE FROM lineitem_alter'); DROP TABLE lineitem_alter; --- check that nothing's left over on workers +-- check that nothing's left over on workers, other than the leftover shard created +-- during the unsuccessful COPY \c - - - :worker_1_port SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%'; \c - - - :master_port diff --git a/src/test/regress/input/multi_append_table_to_shard.source b/src/test/regress/input/multi_append_table_to_shard.source index e843ff9f0..1348eabe9 100644 --- a/src/test/regress/input/multi_append_table_to_shard.source +++ b/src/test/regress/input/multi_append_table_to_shard.source @@ -32,12 +32,12 @@ SELECT master_create_worker_shards('multi_append_table_to_shard_right_hash', 1, -- Replicate 'left' table on both workers SELECT set_config('citus.shard_replication_factor', '2', false); -\STAGE multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' -\STAGE multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' +\COPY multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' +\COPY multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' -- Place 'right' table only on the primary worker SELECT set_config('citus.shard_replication_factor', '1', false); -\STAGE multi_append_table_to_shard_right FROM '@abs_srcdir@/data/agg.data' +\COPY multi_append_table_to_shard_right FROM '@abs_srcdir@/data/agg.data' -- Reset shard replication factor to ensure tasks will be assigned to both workers SELECT set_config('citus.shard_replication_factor', '2', false); diff --git a/src/test/regress/input/multi_create_schema.source b/src/test/regress/input/multi_create_schema.source index b2818f5ab..22e21d8b0 100644 --- a/src/test/regress/input/multi_create_schema.source +++ b/src/test/regress/input/multi_create_schema.source @@ -11,6 +11,6 @@ CREATE TABLE nation ( n_comment varchar(152)); SELECT master_create_distributed_table('tpch.nation', 'n_nationkey', 'append'); -\STAGE tpch.nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|' +\COPY tpch.nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|' SELECT count(*) from tpch.nation; diff --git a/src/test/regress/input/multi_large_shardid.source b/src/test/regress/input/multi_large_shardid.source index 21f69a209..d26d4497e 100644 --- a/src/test/regress/input/multi_large_shardid.source +++ b/src/test/regress/input/multi_large_shardid.source @@ -13,8 +13,8 @@ ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 9000000; -- Stage additional data to start using large shard identifiers. -\STAGE lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -\STAGE lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' +\COPY lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +\COPY lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' -- Query #1 from the TPC-H decision support benchmark. diff --git a/src/test/regress/input/multi_master_delete_protocol.source b/src/test/regress/input/multi_master_delete_protocol.source index 55ad68a3f..096c9034e 100644 --- a/src/test/regress/input/multi_master_delete_protocol.source +++ b/src/test/regress/input/multi_master_delete_protocol.source @@ -19,9 +19,9 @@ CREATE TABLE customer_delete_protocol ( c_comment varchar(117) not null); SELECT master_create_distributed_table('customer_delete_protocol', 'c_custkey', 'append'); -\STAGE customer_delete_protocol FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' -\STAGE customer_delete_protocol FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' -\STAGE customer_delete_protocol FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' +\COPY customer_delete_protocol FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' +\COPY customer_delete_protocol FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' +\COPY customer_delete_protocol FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' -- Testing master_apply_delete_command -- Check that we don't support conditions on columns other than partition key. diff --git a/src/test/regress/input/multi_outer_join.source b/src/test/regress/input/multi_outer_join.source index 3f4c89bd2..ca7e6ce6e 100644 --- a/src/test/regress/input/multi_outer_join.source +++ b/src/test/regress/input/multi_outer_join.source @@ -53,11 +53,11 @@ FROM multi_outer_join_left a LEFT JOIN multi_outer_join_third b ON (l_custkey = t_custkey); -- Left table is a large table -\STAGE multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-10.data' with delimiter '|' -\STAGE multi_outer_join_left FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|' +\COPY multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-10.data' with delimiter '|' +\COPY multi_outer_join_left FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|' -- Right table is a small table -\STAGE multi_outer_join_right FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|' +\COPY multi_outer_join_right FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|' -- Make sure we do not crash if one table has no shards SELECT @@ -71,7 +71,7 @@ FROM multi_outer_join_third a LEFT JOIN multi_outer_join_right b ON (r_custkey = t_custkey); -- Third table is a single shard table with all data -\STAGE multi_outer_join_third FROM '@abs_srcdir@/data/customer-1-30.data' with delimiter '|' +\COPY multi_outer_join_third FROM '@abs_srcdir@/data/customer-1-30.data' with delimiter '|' -- Regular outer join should return results for all rows SELECT @@ -150,7 +150,7 @@ FROM -- Turn the right table into a large table -\STAGE multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' +\COPY multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' -- Shards do not have 1-1 matching. We should error here. @@ -164,11 +164,11 @@ SELECT * FROM master_apply_delete_command('DELETE FROM multi_outer_join_left'); SELECT * FROM master_apply_delete_command('DELETE FROM multi_outer_join_right'); -- reload shards with 1-1 matching -\STAGE multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|' -\STAGE multi_outer_join_left FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' +\COPY multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|' +\COPY multi_outer_join_left FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' -\STAGE multi_outer_join_right FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|' -\STAGE multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' +\COPY multi_outer_join_right FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|' +\COPY multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' -- multi_outer_join_third is a single shard table @@ -409,7 +409,7 @@ ORDER BY cnt DESC, l1.l_custkey DESC LIMIT 20; -- Add a shard to the left table that overlaps with multiple shards in the right -\STAGE multi_outer_join_left FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' +\COPY multi_outer_join_left FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' -- All outer joins should error out diff --git a/src/test/regress/input/multi_stage_data.source b/src/test/regress/input/multi_stage_data.source index 213521a98..06c6af200 100644 --- a/src/test/regress/input/multi_stage_data.source +++ b/src/test/regress/input/multi_stage_data.source @@ -6,29 +6,16 @@ -- citus.shard_max_size. These values are set in pg_regress_multi.pl. Shard placement -- policy is left to the default value (round-robin) to test the common install case. - ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 290000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 290000; +\COPY lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +\COPY lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' -\STAGE lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -\STAGE lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' +\COPY orders FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' +\COPY orders FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' -\STAGE orders FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' -\STAGE orders FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' - -\STAGE customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' -\STAGE nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|' -\STAGE part FROM '@abs_srcdir@/data/part.data' with delimiter '|' -\STAGE supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|' - --- check that we error out if we try to stage into a hash partitioned table - -CREATE TABLE nation_hash_partitioned ( - n_nationkey integer not null, - n_name char(25) not null, - n_regionkey integer not null, - n_comment varchar(152)); -SELECT master_create_distributed_table('nation_hash_partitioned', 'n_nationkey', 'hash'); - -\STAGE nation_hash_partitioned FROM '@abs_srcdir@/data/nation.data' with delimiter '|' +\COPY customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' +\COPY nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|' +\COPY part FROM '@abs_srcdir@/data/part.data' with delimiter '|' +\COPY supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|' \ No newline at end of file diff --git a/src/test/regress/input/multi_stage_large_records.source b/src/test/regress/input/multi_stage_large_records.source index 496a6d757..fa4716497 100644 --- a/src/test/regress/input/multi_stage_large_records.source +++ b/src/test/regress/input/multi_stage_large_records.source @@ -15,7 +15,7 @@ SET citus.shard_max_size TO "256kB"; CREATE TABLE large_records_table (data_id integer, data text); SELECT master_create_distributed_table('large_records_table', 'data_id', 'append'); -\STAGE large_records_table FROM '@abs_srcdir@/data/large_records.data' with delimiter '|' +\COPY large_records_table FROM '@abs_srcdir@/data/large_records.data' with delimiter '|' SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard, pg_class WHERE pg_class.oid=logicalrelid AND relname='large_records_table' diff --git a/src/test/regress/input/multi_stage_more_data.source b/src/test/regress/input/multi_stage_more_data.source index 665c8c20d..c9724a72a 100644 --- a/src/test/regress/input/multi_stage_more_data.source +++ b/src/test/regress/input/multi_stage_more_data.source @@ -11,6 +11,6 @@ ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 280000; -- staging causes the planner to consider customer and part tables as large, and -- evaluate plans where some of the underlying tables need to be repartitioned. -\STAGE customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' -\STAGE customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' -\STAGE part FROM '@abs_srcdir@/data/part.more.data' with delimiter '|' +\COPY customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' +\COPY customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' +\COPY part FROM '@abs_srcdir@/data/part.more.data' with delimiter '|' diff --git a/src/test/regress/input/multi_subquery.source b/src/test/regress/input/multi_subquery.source index 52bbd7199..5dba2eb83 100644 --- a/src/test/regress/input/multi_subquery.source +++ b/src/test/regress/input/multi_subquery.source @@ -79,13 +79,33 @@ FROM -- Stage data to tables. +SELECT master_create_empty_shard('lineitem_subquery') AS new_shard_id +\gset +UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986 +WHERE shardid = :new_shard_id; + +SELECT master_create_empty_shard('lineitem_subquery') AS new_shard_id +\gset +UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14947 +WHERE shardid = :new_shard_id; + +SELECT master_create_empty_shard('orders_subquery') AS new_shard_id +\gset +UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986 +WHERE shardid = :new_shard_id; + +SELECT master_create_empty_shard('orders_subquery') AS new_shard_id +\gset +UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14946 +WHERE shardid = :new_shard_id; + SET citus.shard_max_size TO "1MB"; -\STAGE lineitem_subquery FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -\STAGE lineitem_subquery FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' +\COPY lineitem_subquery FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +\COPY lineitem_subquery FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' -\STAGE orders_subquery FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' -\STAGE orders_subquery FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' +\COPY orders_subquery FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' +\COPY orders_subquery FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' -- Check that we error out if shard min/max values are not exactly same. @@ -280,10 +300,13 @@ SELECT max(l_orderkey) FROM ) z ) y; --- Load more data to one relation, then test if we error out because of different +-- Add one more shard to one relation, then test if we error out because of different -- shard counts for joining relations. -\STAGE orders_subquery FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' +SELECT master_create_empty_shard('orders_subquery') AS new_shard_id +\gset +UPDATE pg_dist_shard SET shardminvalue = 15000, shardmaxvalue = 20000 +WHERE shardid = :new_shard_id; SELECT avg(unit_price) diff --git a/src/test/regress/output/multi_agg_distinct.source b/src/test/regress/output/multi_agg_distinct.source index ab1451235..7be00eeaf 100644 --- a/src/test/regress/output/multi_agg_distinct.source +++ b/src/test/regress/output/multi_agg_distinct.source @@ -27,9 +27,17 @@ SELECT master_create_distributed_table('lineitem_range', 'l_orderkey', 'range'); (1 row) +SELECT master_create_empty_shard('lineitem_range') AS new_shard_id +\gset +UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986 +WHERE shardid = :new_shard_id; +SELECT master_create_empty_shard('lineitem_range') AS new_shard_id +\gset +UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14947 +WHERE shardid = :new_shard_id; SET citus.shard_max_size TO "500MB"; -\STAGE lineitem_range FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -\STAGE lineitem_range FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' +\COPY lineitem_range FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +\COPY lineitem_range FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' RESET citus.shard_max_size; -- Run aggregate(distinct) on partition column for range partitioned table SELECT count(distinct l_orderkey) FROM lineitem_range; diff --git a/src/test/regress/output/multi_agg_type_conversion.source b/src/test/regress/output/multi_agg_type_conversion.source index 49f65752f..e7942e19c 100644 --- a/src/test/regress/output/multi_agg_type_conversion.source +++ b/src/test/regress/output/multi_agg_type_conversion.source @@ -41,7 +41,7 @@ SELECT master_create_distributed_table('aggregate_type', 'float_value', 'append' (1 row) -\STAGE aggregate_type FROM '@abs_srcdir@/data/agg_type.data' +\COPY aggregate_type FROM '@abs_srcdir@/data/agg_type.data' -- Test conversions using aggregates on floats and division SELECT min(float_value), max(float_value), sum(float_value), count(float_value), avg(float_value) diff --git a/src/test/regress/output/multi_alter_table_statements.source b/src/test/regress/output/multi_alter_table_statements.source index ecd1c1ff8..ebad02622 100644 --- a/src/test/regress/output/multi_alter_table_statements.source +++ b/src/test/regress/output/multi_alter_table_statements.source @@ -30,7 +30,7 @@ SELECT master_create_distributed_table('lineitem_alter', 'l_orderkey', 'append') (1 row) -\STAGE lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +\COPY lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -- Verify that we can add columns ALTER TABLE lineitem_alter ADD COLUMN float_column FLOAT; NOTICE: using one-phase commit for distributed DDL commands @@ -121,8 +121,8 @@ ALTER TABLE lineitem_alter ALTER COLUMN float_column SET DEFAULT 1; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' ALTER TABLE lineitem_alter ALTER COLUMN int_column1 DROP DEFAULT; --- \stage to verify that default values take effect -\STAGE lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +-- \COPY to verify that default values take effect +\COPY lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column; float_column | count --------------+------- @@ -167,15 +167,11 @@ ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET NOT NULL; -- Drop default so that NULLs will be inserted for this column ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP DEFAULT; --- \stage should fail because it will try to insert NULLs for a NOT NULL column -\STAGE lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +-- \COPY should fail because it will try to insert NULLs for a NOT NULL column +-- Note, this operation will create a table on the workers but it won't be in the metadata +\COPY lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' ERROR: null value in column "int_column2" violates not-null constraint -DETAIL: Failing row contains (1, 155190, 7706, 1, 17.00, 21168.23, 0.04, 0.02, N, O, 03-13-1996, 02-12-1996, 03-22-1996, DELIVER IN PERSON , TRUCK , egular courts above the, 1, null, null, null, null). -CONTEXT: COPY lineitem_alter_220006, line 1: "1|155190|7706|1|17|21168.23|0.04|0.02|N|O|1996-03-13|1996-02-12|1996-03-22|DELIVER IN PERSON|TRUCK|e..." -ERROR: null value in column "int_column2" violates not-null constraint -DETAIL: Failing row contains (1, 155190, 7706, 1, 17.00, 21168.23, 0.04, 0.02, N, O, 03-13-1996, 02-12-1996, 03-22-1996, DELIVER IN PERSON , TRUCK , egular courts above the, 1, null, null, null, null). -CONTEXT: COPY lineitem_alter_220006, line 1: "1|155190|7706|1|17|21168.23|0.04|0.02|N|O|1996-03-13|1996-02-12|1996-03-22|DELIVER IN PERSON|TRUCK|e..." -\stage: failed to replicate shard to enough replicas +DETAIL: Failing row contains (1, 155190, 7706, 1, 17.00, 21168.23, 0.04, 0.02, N, O, 1996-03-13, 1996-02-12, 1996-03-22, DELIVER IN PERSON , TRUCK , egular courts above the, 1, null, null, null, null). -- Verify that DROP NOT NULL works ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP NOT NULL; \d lineitem_alter @@ -204,8 +200,8 @@ ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP NOT NULL; int_column2 | integer | null_column | integer | --- \stage should succeed now -\STAGE lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +-- \COPY should succeed now +\COPY lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' SELECT count(*) from lineitem_alter; count ------- @@ -474,7 +470,7 @@ SELECT master_create_worker_shards('test_ab', 8, 2); INSERT INTO test_ab VALUES (2, 10); INSERT INTO test_ab VALUES (2, 11); CREATE UNIQUE INDEX temp_unique_index_1 ON test_ab(a); -WARNING: could not create unique index "temp_unique_index_1_220016" +WARNING: could not create unique index "temp_unique_index_1_220021" DETAIL: Key (a)=(2) is duplicated. CONTEXT: while executing command on localhost:57638 ERROR: could not execute DDL command on worker node shards @@ -605,15 +601,17 @@ SET citus.enable_ddl_propagation to true; SELECT master_apply_delete_command('DELETE FROM lineitem_alter'); master_apply_delete_command ----------------------------- - 9 + 14 (1 row) DROP TABLE lineitem_alter; --- check that nothing's left over on workers +-- check that nothing's left over on workers, other than the leftover shard created +-- during the unsuccessful COPY \c - - - :worker_1_port SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%'; - relname ---------- -(0 rows) + relname +----------------------- + lineitem_alter_220009 +(1 row) \c - - - :master_port diff --git a/src/test/regress/output/multi_append_table_to_shard.source b/src/test/regress/output/multi_append_table_to_shard.source index c0b23b9d8..c63dd3155 100644 --- a/src/test/regress/output/multi_append_table_to_shard.source +++ b/src/test/regress/output/multi_append_table_to_shard.source @@ -50,8 +50,8 @@ SELECT set_config('citus.shard_replication_factor', '2', false); 2 (1 row) -\STAGE multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' -\STAGE multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' +\COPY multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' +\COPY multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' -- Place 'right' table only on the primary worker SELECT set_config('citus.shard_replication_factor', '1', false); set_config @@ -59,7 +59,7 @@ SELECT set_config('citus.shard_replication_factor', '1', false); 1 (1 row) -\STAGE multi_append_table_to_shard_right FROM '@abs_srcdir@/data/agg.data' +\COPY multi_append_table_to_shard_right FROM '@abs_srcdir@/data/agg.data' -- Reset shard replication factor to ensure tasks will be assigned to both workers SELECT set_config('citus.shard_replication_factor', '2', false); set_config diff --git a/src/test/regress/output/multi_create_schema.source b/src/test/regress/output/multi_create_schema.source index 2b2278a36..038d9239f 100644 --- a/src/test/regress/output/multi_create_schema.source +++ b/src/test/regress/output/multi_create_schema.source @@ -12,7 +12,7 @@ SELECT master_create_distributed_table('tpch.nation', 'n_nationkey', 'append'); (1 row) -\STAGE tpch.nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|' +\COPY tpch.nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|' SELECT count(*) from tpch.nation; count ------- diff --git a/src/test/regress/output/multi_large_shardid.source b/src/test/regress/output/multi_large_shardid.source index 684732ee6..3a7c22329 100644 --- a/src/test/regress/output/multi_large_shardid.source +++ b/src/test/regress/output/multi_large_shardid.source @@ -7,8 +7,8 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 100200300400500; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 9000000; -- Stage additional data to start using large shard identifiers. -\STAGE lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -\STAGE lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' +\COPY lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +\COPY lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' -- Query #1 from the TPC-H decision support benchmark. SELECT l_returnflag, diff --git a/src/test/regress/output/multi_master_delete_protocol.source b/src/test/regress/output/multi_master_delete_protocol.source index 15c9ee819..f59722e80 100644 --- a/src/test/regress/output/multi_master_delete_protocol.source +++ b/src/test/regress/output/multi_master_delete_protocol.source @@ -19,9 +19,9 @@ SELECT master_create_distributed_table('customer_delete_protocol', 'c_custkey', (1 row) -\STAGE customer_delete_protocol FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' -\STAGE customer_delete_protocol FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' -\STAGE customer_delete_protocol FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' +\COPY customer_delete_protocol FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' +\COPY customer_delete_protocol FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' +\COPY customer_delete_protocol FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' -- Testing master_apply_delete_command -- Check that we don't support conditions on columns other than partition key. SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol diff --git a/src/test/regress/output/multi_outer_join.source b/src/test/regress/output/multi_outer_join.source index 089e2a134..b3f45d825 100644 --- a/src/test/regress/output/multi_outer_join.source +++ b/src/test/regress/output/multi_outer_join.source @@ -62,10 +62,10 @@ FROM ERROR: cannot perform distributed planning on this query DETAIL: Shards of relations in outer join queries must have 1-to-1 shard partitioning -- Left table is a large table -\STAGE multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-10.data' with delimiter '|' -\STAGE multi_outer_join_left FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|' +\COPY multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-10.data' with delimiter '|' +\COPY multi_outer_join_left FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|' -- Right table is a small table -\STAGE multi_outer_join_right FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|' +\COPY multi_outer_join_right FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|' -- Make sure we do not crash if one table has no shards SELECT min(l_custkey), max(l_custkey) @@ -84,7 +84,7 @@ LOG: join order: [ "multi_outer_join_third" ][ broadcast join "multi_outer_join (1 row) -- Third table is a single shard table with all data -\STAGE multi_outer_join_third FROM '@abs_srcdir@/data/customer-1-30.data' with delimiter '|' +\COPY multi_outer_join_third FROM '@abs_srcdir@/data/customer-1-30.data' with delimiter '|' -- Regular outer join should return results for all rows SELECT min(l_custkey), max(l_custkey) @@ -202,7 +202,7 @@ LOG: join order: [ "multi_outer_join_left" ][ broadcast join "multi_outer_join_ (1 row) -- Turn the right table into a large table -\STAGE multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' +\COPY multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' -- Shards do not have 1-1 matching. We should error here. SELECT min(l_custkey), max(l_custkey) @@ -224,10 +224,10 @@ SELECT * FROM master_apply_delete_command('DELETE FROM multi_outer_join_right'); (1 row) -- reload shards with 1-1 matching -\STAGE multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|' -\STAGE multi_outer_join_left FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' -\STAGE multi_outer_join_right FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|' -\STAGE multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' +\COPY multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|' +\COPY multi_outer_join_left FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' +\COPY multi_outer_join_right FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|' +\COPY multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' -- multi_outer_join_third is a single shard table -- Regular left join should work as expected SELECT @@ -754,7 +754,7 @@ LIMIT 20; ERROR: cannot perform distributed planning on this query DETAIL: Subqueries in outer joins are not supported -- Add a shard to the left table that overlaps with multiple shards in the right -\STAGE multi_outer_join_left FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' +\COPY multi_outer_join_left FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' -- All outer joins should error out SELECT min(l_custkey), max(l_custkey) diff --git a/src/test/regress/output/multi_stage_data.source b/src/test/regress/output/multi_stage_data.source index b322feb76..ca47fbb88 100644 --- a/src/test/regress/output/multi_stage_data.source +++ b/src/test/regress/output/multi_stage_data.source @@ -7,25 +7,11 @@ -- policy is left to the default value (round-robin) to test the common install case. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 290000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 290000; -\STAGE lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -\STAGE lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' -\STAGE orders FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' -\STAGE orders FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' -\STAGE customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' -\STAGE nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|' -\STAGE part FROM '@abs_srcdir@/data/part.data' with delimiter '|' -\STAGE supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|' --- check that we error out if we try to stage into a hash partitioned table -CREATE TABLE nation_hash_partitioned ( - n_nationkey integer not null, - n_name char(25) not null, - n_regionkey integer not null, - n_comment varchar(152)); -SELECT master_create_distributed_table('nation_hash_partitioned', 'n_nationkey', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -\STAGE nation_hash_partitioned FROM '@abs_srcdir@/data/nation.data' with delimiter '|' -\stage: staging data into hash partitioned tables is not supported +\COPY lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +\COPY lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' +\COPY orders FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' +\COPY orders FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' +\COPY customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' +\COPY nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|' +\COPY part FROM '@abs_srcdir@/data/part.data' with delimiter '|' +\COPY supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|' diff --git a/src/test/regress/output/multi_stage_large_records.source b/src/test/regress/output/multi_stage_large_records.source index ef707850c..e40e2ee71 100644 --- a/src/test/regress/output/multi_stage_large_records.source +++ b/src/test/regress/output/multi_stage_large_records.source @@ -14,7 +14,7 @@ SELECT master_create_distributed_table('large_records_table', 'data_id', 'append (1 row) -\STAGE large_records_table FROM '@abs_srcdir@/data/large_records.data' with delimiter '|' +\COPY large_records_table FROM '@abs_srcdir@/data/large_records.data' with delimiter '|' SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard, pg_class WHERE pg_class.oid=logicalrelid AND relname='large_records_table' ORDER BY shardid; diff --git a/src/test/regress/output/multi_stage_more_data.source b/src/test/regress/output/multi_stage_more_data.source index 3c2c5eba5..b1f51c726 100644 --- a/src/test/regress/output/multi_stage_more_data.source +++ b/src/test/regress/output/multi_stage_more_data.source @@ -6,6 +6,6 @@ ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 280000; -- We stage more data to customer and part tables to test distributed joins. The -- staging causes the planner to consider customer and part tables as large, and -- evaluate plans where some of the underlying tables need to be repartitioned. -\STAGE customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' -\STAGE customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' -\STAGE part FROM '@abs_srcdir@/data/part.more.data' with delimiter '|' +\COPY customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' +\COPY customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' +\COPY part FROM '@abs_srcdir@/data/part.more.data' with delimiter '|' diff --git a/src/test/regress/output/multi_subquery.source b/src/test/regress/output/multi_subquery.source index 15ee33f2e..96d8544d6 100644 --- a/src/test/regress/output/multi_subquery.source +++ b/src/test/regress/output/multi_subquery.source @@ -83,11 +83,27 @@ FROM (1 row) -- Stage data to tables. +SELECT master_create_empty_shard('lineitem_subquery') AS new_shard_id +\gset +UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986 +WHERE shardid = :new_shard_id; +SELECT master_create_empty_shard('lineitem_subquery') AS new_shard_id +\gset +UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14947 +WHERE shardid = :new_shard_id; +SELECT master_create_empty_shard('orders_subquery') AS new_shard_id +\gset +UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986 +WHERE shardid = :new_shard_id; +SELECT master_create_empty_shard('orders_subquery') AS new_shard_id +\gset +UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14946 +WHERE shardid = :new_shard_id; SET citus.shard_max_size TO "1MB"; -\STAGE lineitem_subquery FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -\STAGE lineitem_subquery FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' -\STAGE orders_subquery FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' -\STAGE orders_subquery FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' +\COPY lineitem_subquery FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +\COPY lineitem_subquery FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' +\COPY orders_subquery FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' +\COPY orders_subquery FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' -- Check that we error out if shard min/max values are not exactly same. SELECT avg(unit_price) @@ -310,9 +326,12 @@ SELECT max(l_orderkey) FROM 14947 (1 row) --- Load more data to one relation, then test if we error out because of different +-- Add one more shard to one relation, then test if we error out because of different -- shard counts for joining relations. -\STAGE orders_subquery FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' +SELECT master_create_empty_shard('orders_subquery') AS new_shard_id +\gset +UPDATE pg_dist_shard SET shardminvalue = 15000, shardmaxvalue = 20000 +WHERE shardid = :new_shard_id; SELECT avg(unit_price) FROM From 032291670095bbe59c792ba234e109dee74dddf2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20Ba=C5=9Fak?= Date: Thu, 11 Aug 2016 11:51:53 +0300 Subject: [PATCH 2/5] Lowercase \copy to match PostgreSQL's style for local/psql-level functions --- src/bin/scripts/copy_to_distributed_table | 2 +- .../multi_agg_approximate_distinct.out | 2 +- .../regress/expected/multi_schema_support.out | 20 +++++++++---------- .../regress/input/multi_agg_distinct.source | 8 ++++---- .../input/multi_agg_type_conversion.source | 2 +- .../input/multi_alter_table_statements.source | 14 ++++++------- .../input/multi_append_table_to_shard.source | 6 +++--- .../input/multi_complex_count_distinct.source | 4 ++-- src/test/regress/input/multi_copy.source | 2 +- .../regress/input/multi_create_schema.source | 2 +- .../regress/input/multi_large_shardid.source | 4 ++-- .../input/multi_master_delete_protocol.source | 6 +++--- .../regress/input/multi_outer_join.source | 20 +++++++++---------- .../regress/input/multi_stage_data.source | 16 +++++++-------- .../input/multi_stage_large_records.source | 2 +- .../input/multi_stage_more_data.source | 6 +++--- src/test/regress/input/multi_subquery.source | 8 ++++---- .../regress/output/multi_agg_distinct.source | 8 ++++---- .../output/multi_agg_type_conversion.source | 2 +- .../multi_alter_table_statements.source | 14 ++++++------- .../output/multi_append_table_to_shard.source | 6 +++--- .../multi_complex_count_distinct.source | 4 ++-- src/test/regress/output/multi_copy.source | 2 +- .../regress/output/multi_create_schema.source | 2 +- .../regress/output/multi_large_shardid.source | 4 ++-- .../multi_master_delete_protocol.source | 6 +++--- .../regress/output/multi_outer_join.source | 20 +++++++++---------- .../regress/output/multi_stage_data.source | 16 +++++++-------- .../output/multi_stage_large_records.source | 2 +- .../output/multi_stage_more_data.source | 6 +++--- src/test/regress/output/multi_subquery.source | 8 ++++---- .../sql/multi_agg_approximate_distinct.sql | 2 +- src/test/regress/sql/multi_schema_support.sql | 20 +++++++++---------- 33 files changed, 123 insertions(+), 123 deletions(-) diff --git a/src/bin/scripts/copy_to_distributed_table b/src/bin/scripts/copy_to_distributed_table index 3da5e8629..50994505d 100755 --- a/src/bin/scripts/copy_to_distributed_table +++ b/src/bin/scripts/copy_to_distributed_table @@ -1,6 +1,6 @@ #!/usr/bin/env bash echo "WARNING: copy_to_distributed_table is now deprecated." >&2 -echo "HINT: You can use \\COPY on distributed tables, which is a lot faster." >&2 +echo "HINT: You can use \\copy on distributed tables, which is a lot faster." >&2 # make bash behave set -euo pipefail diff --git a/src/test/regress/expected/multi_agg_approximate_distinct.out b/src/test/regress/expected/multi_agg_approximate_distinct.out index 57b27ee1e..4249d8414 100644 --- a/src/test/regress/expected/multi_agg_approximate_distinct.out +++ b/src/test/regress/expected/multi_agg_approximate_distinct.out @@ -123,7 +123,7 @@ SELECT master_create_worker_shards('test_count_distinct_schema.nation_hash', 4, (1 row) -\COPY test_count_distinct_schema.nation_hash FROM STDIN with delimiter '|'; +\copy test_count_distinct_schema.nation_hash FROM STDIN with delimiter '|'; SET search_path TO public; SET citus.count_distinct_error_rate TO 0.01; SELECT COUNT (DISTINCT n_regionkey) FROM test_count_distinct_schema.nation_hash; diff --git a/src/test/regress/expected/multi_schema_support.out b/src/test/regress/expected/multi_schema_support.out index a02bd02fd..536178675 100644 --- a/src/test/regress/expected/multi_schema_support.out +++ b/src/test/regress/expected/multi_schema_support.out @@ -13,7 +13,7 @@ CREATE TABLE public.nation_local( n_regionkey integer not null, n_comment varchar(152) ); -\COPY public.nation_local FROM STDIN with delimiter '|'; +\copy public.nation_local FROM STDIN with delimiter '|'; CREATE TABLE test_schema_support.nation_append( n_nationkey integer not null, n_name char(25) not null, @@ -122,7 +122,7 @@ SELECT master_create_distributed_table('nation_append_search_path', 'n_nationkey (1 row) -\COPY nation_append_search_path FROM STDIN with delimiter '|'; +\copy nation_append_search_path FROM STDIN with delimiter '|'; -- create shard with master_create_worker_shards CREATE TABLE test_schema_support.nation_hash( n_nationkey integer not null, @@ -192,7 +192,7 @@ SELECT * FROM nation_hash WHERE n_nationkey = 7; -- test UDFs with schemas SET search_path TO public; -\COPY test_schema_support.nation_hash FROM STDIN with delimiter '|'; +\copy test_schema_support.nation_hash FROM STDIN with delimiter '|'; -- create UDF in master node CREATE OR REPLACE FUNCTION dummyFunction(theValue integer) RETURNS text AS @@ -446,7 +446,7 @@ SELECT master_create_worker_shards('test_schema_support.nation_hash_collation', (1 row) -\COPY test_schema_support.nation_hash_collation FROM STDIN with delimiter '|'; +\copy test_schema_support.nation_hash_collation FROM STDIN with delimiter '|'; SELECT * FROM test_schema_support.nation_hash_collation; n_nationkey | n_name | n_regionkey | n_comment -------------+---------------------------+-------------+------------------------------------------------------------------------------------------------------------- @@ -489,7 +489,7 @@ SELECT master_create_worker_shards('nation_hash_collation_search_path', 4, 2); (1 row) -\COPY nation_hash_collation_search_path FROM STDIN with delimiter '|'; +\copy nation_hash_collation_search_path FROM STDIN with delimiter '|'; SELECT * FROM nation_hash_collation_search_path; n_nationkey | n_name | n_regionkey | n_comment -------------+---------------------------+-------------+------------------------------------------------------------------------------------------------------------- @@ -542,7 +542,7 @@ SELECT master_create_worker_shards('test_schema_support.nation_hash_composite_ty (1 row) -- insert some data to verify composite type queries -\COPY test_schema_support.nation_hash_composite_types FROM STDIN with delimiter '|'; +\copy test_schema_support.nation_hash_composite_types FROM STDIN with delimiter '|'; SELECT * FROM test_schema_support.nation_hash_composite_types WHERE test_col = '(a,a)'::test_schema_support.new_composite_type; n_nationkey | n_name | n_regionkey | n_comment | test_col -------------+---------------------------+-------------+----------------------------------------------------+---------- @@ -829,7 +829,7 @@ SELECT master_apply_delete_command('DELETE FROM test_schema_support.nation_appen \c - - - :master_port -- test with search_path is set SET search_path TO test_schema_support; -\COPY nation_append FROM STDIN with delimiter '|'; +\copy nation_append FROM STDIN with delimiter '|'; SELECT master_apply_delete_command('DELETE FROM nation_append') ; master_apply_delete_command ----------------------------- @@ -873,7 +873,7 @@ SELECT master_create_worker_shards('test_schema_support_join_1.nation_hash', 4, (1 row) -\COPY test_schema_support_join_1.nation_hash FROM STDIN with delimiter '|'; +\copy test_schema_support_join_1.nation_hash FROM STDIN with delimiter '|'; SELECT master_create_distributed_table('test_schema_support_join_1.nation_hash_2', 'n_nationkey', 'hash'); master_create_distributed_table --------------------------------- @@ -886,7 +886,7 @@ SELECT master_create_worker_shards('test_schema_support_join_1.nation_hash_2', 4 (1 row) -\COPY test_schema_support_join_1.nation_hash_2 FROM STDIN with delimiter '|'; +\copy test_schema_support_join_1.nation_hash_2 FROM STDIN with delimiter '|'; SELECT master_create_distributed_table('test_schema_support_join_2.nation_hash', 'n_nationkey', 'hash'); master_create_distributed_table --------------------------------- @@ -899,7 +899,7 @@ SELECT master_create_worker_shards('test_schema_support_join_2.nation_hash', 4, (1 row) -\COPY test_schema_support_join_2.nation_hash FROM STDIN with delimiter '|'; +\copy test_schema_support_join_2.nation_hash FROM STDIN with delimiter '|'; -- check when search_path is public, -- join of two tables which are in different schemas, -- join on partition column diff --git a/src/test/regress/input/multi_agg_distinct.source b/src/test/regress/input/multi_agg_distinct.source index 590a4b514..ec6f525ed 100644 --- a/src/test/regress/input/multi_agg_distinct.source +++ b/src/test/regress/input/multi_agg_distinct.source @@ -38,8 +38,8 @@ UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14947 WHERE shardid = :new_shard_id; SET citus.shard_max_size TO "500MB"; -\COPY lineitem_range FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -\COPY lineitem_range FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' +\copy lineitem_range FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +\copy lineitem_range FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' RESET citus.shard_max_size; -- Run aggregate(distinct) on partition column for range partitioned table @@ -99,8 +99,8 @@ CREATE TABLE lineitem_hash ( SELECT master_create_distributed_table('lineitem_hash', 'l_orderkey', 'hash'); SELECT master_create_worker_shards('lineitem_hash', 4, 1); -\COPY lineitem_hash FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -\COPY lineitem_hash FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' +\copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +\copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' -- aggregate(distinct) on partition column is allowed diff --git a/src/test/regress/input/multi_agg_type_conversion.source b/src/test/regress/input/multi_agg_type_conversion.source index a21f57c64..8ef9d2b94 100644 --- a/src/test/regress/input/multi_agg_type_conversion.source +++ b/src/test/regress/input/multi_agg_type_conversion.source @@ -24,7 +24,7 @@ CREATE TABLE aggregate_type ( interval_value interval not null); SELECT master_create_distributed_table('aggregate_type', 'float_value', 'append'); -\COPY aggregate_type FROM '@abs_srcdir@/data/agg_type.data' +\copy aggregate_type FROM '@abs_srcdir@/data/agg_type.data' -- Test conversions using aggregates on floats and division diff --git a/src/test/regress/input/multi_alter_table_statements.source b/src/test/regress/input/multi_alter_table_statements.source index e8d341376..d0bb6f605 100644 --- a/src/test/regress/input/multi_alter_table_statements.source +++ b/src/test/regress/input/multi_alter_table_statements.source @@ -29,7 +29,7 @@ CREATE TABLE lineitem_alter ( l_comment varchar(44) not null ); SELECT master_create_distributed_table('lineitem_alter', 'l_orderkey', 'append'); -\COPY lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +\copy lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -- Verify that we can add columns @@ -57,8 +57,8 @@ SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1; ALTER TABLE lineitem_alter ALTER COLUMN float_column SET DEFAULT 1; ALTER TABLE lineitem_alter ALTER COLUMN int_column1 DROP DEFAULT; --- \COPY to verify that default values take effect -\COPY lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +-- \copy to verify that default values take effect +\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column; SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1; @@ -71,17 +71,17 @@ ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET NOT NULL; -- Drop default so that NULLs will be inserted for this column ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP DEFAULT; --- \COPY should fail because it will try to insert NULLs for a NOT NULL column +-- \copy should fail because it will try to insert NULLs for a NOT NULL column -- Note, this operation will create a table on the workers but it won't be in the metadata -\COPY lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -- Verify that DROP NOT NULL works ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP NOT NULL; \d lineitem_alter --- \COPY should succeed now -\COPY lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +-- \copy should succeed now +\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' SELECT count(*) from lineitem_alter; -- Verify that SET DATA TYPE works diff --git a/src/test/regress/input/multi_append_table_to_shard.source b/src/test/regress/input/multi_append_table_to_shard.source index 1348eabe9..fef9e8488 100644 --- a/src/test/regress/input/multi_append_table_to_shard.source +++ b/src/test/regress/input/multi_append_table_to_shard.source @@ -32,12 +32,12 @@ SELECT master_create_worker_shards('multi_append_table_to_shard_right_hash', 1, -- Replicate 'left' table on both workers SELECT set_config('citus.shard_replication_factor', '2', false); -\COPY multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' -\COPY multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' +\copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' +\copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' -- Place 'right' table only on the primary worker SELECT set_config('citus.shard_replication_factor', '1', false); -\COPY multi_append_table_to_shard_right FROM '@abs_srcdir@/data/agg.data' +\copy multi_append_table_to_shard_right FROM '@abs_srcdir@/data/agg.data' -- Reset shard replication factor to ensure tasks will be assigned to both workers SELECT set_config('citus.shard_replication_factor', '2', false); diff --git a/src/test/regress/input/multi_complex_count_distinct.source b/src/test/regress/input/multi_complex_count_distinct.source index edb6c68e4..3691a7472 100644 --- a/src/test/regress/input/multi_complex_count_distinct.source +++ b/src/test/regress/input/multi_complex_count_distinct.source @@ -29,8 +29,8 @@ CREATE TABLE lineitem_hash ( SELECT master_create_distributed_table('lineitem_hash', 'l_orderkey', 'hash'); SELECT master_create_worker_shards('lineitem_hash', 8, 1); -\COPY lineitem_hash FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -\COPY lineitem_hash FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' +\copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +\copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' SET citus.task_executor_type to "task-tracker"; diff --git a/src/test/regress/input/multi_copy.source b/src/test/regress/input/multi_copy.source index da6507b44..53a08d21f 100644 --- a/src/test/regress/input/multi_copy.source +++ b/src/test/regress/input/multi_copy.source @@ -105,7 +105,7 @@ COPY customer_copy_hash FROM '@abs_srcdir@/data/customer.2.data' WITH (DELIMITER SELECT count(*) FROM customer_copy_hash; -- Test client-side copy from file -\COPY customer_copy_hash FROM '@abs_srcdir@/data/customer.3.data' WITH (DELIMITER '|'); +\copy customer_copy_hash FROM '@abs_srcdir@/data/customer.3.data' WITH (DELIMITER '|'); -- Confirm that data was copied SELECT count(*) FROM customer_copy_hash; diff --git a/src/test/regress/input/multi_create_schema.source b/src/test/regress/input/multi_create_schema.source index 22e21d8b0..ece82d4bf 100644 --- a/src/test/regress/input/multi_create_schema.source +++ b/src/test/regress/input/multi_create_schema.source @@ -11,6 +11,6 @@ CREATE TABLE nation ( n_comment varchar(152)); SELECT master_create_distributed_table('tpch.nation', 'n_nationkey', 'append'); -\COPY tpch.nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|' +\copy tpch.nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|' SELECT count(*) from tpch.nation; diff --git a/src/test/regress/input/multi_large_shardid.source b/src/test/regress/input/multi_large_shardid.source index d26d4497e..53767e90c 100644 --- a/src/test/regress/input/multi_large_shardid.source +++ b/src/test/regress/input/multi_large_shardid.source @@ -13,8 +13,8 @@ ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 9000000; -- Stage additional data to start using large shard identifiers. -\COPY lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -\COPY lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' +\copy lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +\copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' -- Query #1 from the TPC-H decision support benchmark. diff --git a/src/test/regress/input/multi_master_delete_protocol.source b/src/test/regress/input/multi_master_delete_protocol.source index 096c9034e..4160102e6 100644 --- a/src/test/regress/input/multi_master_delete_protocol.source +++ b/src/test/regress/input/multi_master_delete_protocol.source @@ -19,9 +19,9 @@ CREATE TABLE customer_delete_protocol ( c_comment varchar(117) not null); SELECT master_create_distributed_table('customer_delete_protocol', 'c_custkey', 'append'); -\COPY customer_delete_protocol FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' -\COPY customer_delete_protocol FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' -\COPY customer_delete_protocol FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' +\copy customer_delete_protocol FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' +\copy customer_delete_protocol FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' +\copy customer_delete_protocol FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' -- Testing master_apply_delete_command -- Check that we don't support conditions on columns other than partition key. diff --git a/src/test/regress/input/multi_outer_join.source b/src/test/regress/input/multi_outer_join.source index ca7e6ce6e..55d480812 100644 --- a/src/test/regress/input/multi_outer_join.source +++ b/src/test/regress/input/multi_outer_join.source @@ -53,11 +53,11 @@ FROM multi_outer_join_left a LEFT JOIN multi_outer_join_third b ON (l_custkey = t_custkey); -- Left table is a large table -\COPY multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-10.data' with delimiter '|' -\COPY multi_outer_join_left FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|' +\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-10.data' with delimiter '|' +\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|' -- Right table is a small table -\COPY multi_outer_join_right FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|' +\copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|' -- Make sure we do not crash if one table has no shards SELECT @@ -71,7 +71,7 @@ FROM multi_outer_join_third a LEFT JOIN multi_outer_join_right b ON (r_custkey = t_custkey); -- Third table is a single shard table with all data -\COPY multi_outer_join_third FROM '@abs_srcdir@/data/customer-1-30.data' with delimiter '|' +\copy multi_outer_join_third FROM '@abs_srcdir@/data/customer-1-30.data' with delimiter '|' -- Regular outer join should return results for all rows SELECT @@ -150,7 +150,7 @@ FROM -- Turn the right table into a large table -\COPY multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' +\copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' -- Shards do not have 1-1 matching. We should error here. @@ -164,11 +164,11 @@ SELECT * FROM master_apply_delete_command('DELETE FROM multi_outer_join_left'); SELECT * FROM master_apply_delete_command('DELETE FROM multi_outer_join_right'); -- reload shards with 1-1 matching -\COPY multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|' -\COPY multi_outer_join_left FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' +\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|' +\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' -\COPY multi_outer_join_right FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|' -\COPY multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' +\copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|' +\copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' -- multi_outer_join_third is a single shard table @@ -409,7 +409,7 @@ ORDER BY cnt DESC, l1.l_custkey DESC LIMIT 20; -- Add a shard to the left table that overlaps with multiple shards in the right -\COPY multi_outer_join_left FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' +\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' -- All outer joins should error out diff --git a/src/test/regress/input/multi_stage_data.source b/src/test/regress/input/multi_stage_data.source index 06c6af200..850b4edef 100644 --- a/src/test/regress/input/multi_stage_data.source +++ b/src/test/regress/input/multi_stage_data.source @@ -9,13 +9,13 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 290000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 290000; -\COPY lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -\COPY lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' +\copy lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +\copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' -\COPY orders FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' -\COPY orders FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' +\copy orders FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' +\copy orders FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' -\COPY customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' -\COPY nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|' -\COPY part FROM '@abs_srcdir@/data/part.data' with delimiter '|' -\COPY supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|' \ No newline at end of file +\copy customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' +\copy nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|' +\copy part FROM '@abs_srcdir@/data/part.data' with delimiter '|' +\copy supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|' \ No newline at end of file diff --git a/src/test/regress/input/multi_stage_large_records.source b/src/test/regress/input/multi_stage_large_records.source index fa4716497..18c73b55e 100644 --- a/src/test/regress/input/multi_stage_large_records.source +++ b/src/test/regress/input/multi_stage_large_records.source @@ -15,7 +15,7 @@ SET citus.shard_max_size TO "256kB"; CREATE TABLE large_records_table (data_id integer, data text); SELECT master_create_distributed_table('large_records_table', 'data_id', 'append'); -\COPY large_records_table FROM '@abs_srcdir@/data/large_records.data' with delimiter '|' +\copy large_records_table FROM '@abs_srcdir@/data/large_records.data' with delimiter '|' SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard, pg_class WHERE pg_class.oid=logicalrelid AND relname='large_records_table' diff --git a/src/test/regress/input/multi_stage_more_data.source b/src/test/regress/input/multi_stage_more_data.source index c9724a72a..651ce2625 100644 --- a/src/test/regress/input/multi_stage_more_data.source +++ b/src/test/regress/input/multi_stage_more_data.source @@ -11,6 +11,6 @@ ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 280000; -- staging causes the planner to consider customer and part tables as large, and -- evaluate plans where some of the underlying tables need to be repartitioned. -\COPY customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' -\COPY customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' -\COPY part FROM '@abs_srcdir@/data/part.more.data' with delimiter '|' +\copy customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' +\copy customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' +\copy part FROM '@abs_srcdir@/data/part.more.data' with delimiter '|' diff --git a/src/test/regress/input/multi_subquery.source b/src/test/regress/input/multi_subquery.source index 5dba2eb83..5976e2dff 100644 --- a/src/test/regress/input/multi_subquery.source +++ b/src/test/regress/input/multi_subquery.source @@ -101,11 +101,11 @@ WHERE shardid = :new_shard_id; SET citus.shard_max_size TO "1MB"; -\COPY lineitem_subquery FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -\COPY lineitem_subquery FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' +\copy lineitem_subquery FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +\copy lineitem_subquery FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' -\COPY orders_subquery FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' -\COPY orders_subquery FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' +\copy orders_subquery FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' +\copy orders_subquery FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' -- Check that we error out if shard min/max values are not exactly same. diff --git a/src/test/regress/output/multi_agg_distinct.source b/src/test/regress/output/multi_agg_distinct.source index 7be00eeaf..cb75d8f23 100644 --- a/src/test/regress/output/multi_agg_distinct.source +++ b/src/test/regress/output/multi_agg_distinct.source @@ -36,8 +36,8 @@ SELECT master_create_empty_shard('lineitem_range') AS new_shard_id UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14947 WHERE shardid = :new_shard_id; SET citus.shard_max_size TO "500MB"; -\COPY lineitem_range FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -\COPY lineitem_range FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' +\copy lineitem_range FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +\copy lineitem_range FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' RESET citus.shard_max_size; -- Run aggregate(distinct) on partition column for range partitioned table SELECT count(distinct l_orderkey) FROM lineitem_range; @@ -139,8 +139,8 @@ SELECT master_create_worker_shards('lineitem_hash', 4, 1); (1 row) -\COPY lineitem_hash FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -\COPY lineitem_hash FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' +\copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +\copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' -- aggregate(distinct) on partition column is allowed SELECT count(distinct l_orderkey) FROM lineitem_hash; count diff --git a/src/test/regress/output/multi_agg_type_conversion.source b/src/test/regress/output/multi_agg_type_conversion.source index e7942e19c..1ba41083b 100644 --- a/src/test/regress/output/multi_agg_type_conversion.source +++ b/src/test/regress/output/multi_agg_type_conversion.source @@ -41,7 +41,7 @@ SELECT master_create_distributed_table('aggregate_type', 'float_value', 'append' (1 row) -\COPY aggregate_type FROM '@abs_srcdir@/data/agg_type.data' +\copy aggregate_type FROM '@abs_srcdir@/data/agg_type.data' -- Test conversions using aggregates on floats and division SELECT min(float_value), max(float_value), sum(float_value), count(float_value), avg(float_value) diff --git a/src/test/regress/output/multi_alter_table_statements.source b/src/test/regress/output/multi_alter_table_statements.source index ebad02622..4231a6cac 100644 --- a/src/test/regress/output/multi_alter_table_statements.source +++ b/src/test/regress/output/multi_alter_table_statements.source @@ -30,7 +30,7 @@ SELECT master_create_distributed_table('lineitem_alter', 'l_orderkey', 'append') (1 row) -\COPY lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +\copy lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -- Verify that we can add columns ALTER TABLE lineitem_alter ADD COLUMN float_column FLOAT; NOTICE: using one-phase commit for distributed DDL commands @@ -121,8 +121,8 @@ ALTER TABLE lineitem_alter ALTER COLUMN float_column SET DEFAULT 1; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' ALTER TABLE lineitem_alter ALTER COLUMN int_column1 DROP DEFAULT; --- \COPY to verify that default values take effect -\COPY lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +-- \copy to verify that default values take effect +\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column; float_column | count --------------+------- @@ -167,9 +167,9 @@ ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET NOT NULL; -- Drop default so that NULLs will be inserted for this column ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP DEFAULT; --- \COPY should fail because it will try to insert NULLs for a NOT NULL column +-- \copy should fail because it will try to insert NULLs for a NOT NULL column -- Note, this operation will create a table on the workers but it won't be in the metadata -\COPY lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' ERROR: null value in column "int_column2" violates not-null constraint DETAIL: Failing row contains (1, 155190, 7706, 1, 17.00, 21168.23, 0.04, 0.02, N, O, 1996-03-13, 1996-02-12, 1996-03-22, DELIVER IN PERSON , TRUCK , egular courts above the, 1, null, null, null, null). -- Verify that DROP NOT NULL works @@ -200,8 +200,8 @@ ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP NOT NULL; int_column2 | integer | null_column | integer | --- \COPY should succeed now -\COPY lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +-- \copy should succeed now +\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' SELECT count(*) from lineitem_alter; count ------- diff --git a/src/test/regress/output/multi_append_table_to_shard.source b/src/test/regress/output/multi_append_table_to_shard.source index c63dd3155..c04a9563d 100644 --- a/src/test/regress/output/multi_append_table_to_shard.source +++ b/src/test/regress/output/multi_append_table_to_shard.source @@ -50,8 +50,8 @@ SELECT set_config('citus.shard_replication_factor', '2', false); 2 (1 row) -\COPY multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' -\COPY multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' +\copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' +\copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' -- Place 'right' table only on the primary worker SELECT set_config('citus.shard_replication_factor', '1', false); set_config @@ -59,7 +59,7 @@ SELECT set_config('citus.shard_replication_factor', '1', false); 1 (1 row) -\COPY multi_append_table_to_shard_right FROM '@abs_srcdir@/data/agg.data' +\copy multi_append_table_to_shard_right FROM '@abs_srcdir@/data/agg.data' -- Reset shard replication factor to ensure tasks will be assigned to both workers SELECT set_config('citus.shard_replication_factor', '2', false); set_config diff --git a/src/test/regress/output/multi_complex_count_distinct.source b/src/test/regress/output/multi_complex_count_distinct.source index 08ab725da..1e350f73e 100644 --- a/src/test/regress/output/multi_complex_count_distinct.source +++ b/src/test/regress/output/multi_complex_count_distinct.source @@ -34,8 +34,8 @@ SELECT master_create_worker_shards('lineitem_hash', 8, 1); (1 row) -\COPY lineitem_hash FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -\COPY lineitem_hash FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' +\copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +\copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' SET citus.task_executor_type to "task-tracker"; -- count(distinct) is supported on top level query if there -- is a grouping on the partition key diff --git a/src/test/regress/output/multi_copy.source b/src/test/regress/output/multi_copy.source index 6f70e54b0..f65904974 100644 --- a/src/test/regress/output/multi_copy.source +++ b/src/test/regress/output/multi_copy.source @@ -118,7 +118,7 @@ SELECT count(*) FROM customer_copy_hash; (1 row) -- Test client-side copy from file -\COPY customer_copy_hash FROM '@abs_srcdir@/data/customer.3.data' WITH (DELIMITER '|'); +\copy customer_copy_hash FROM '@abs_srcdir@/data/customer.3.data' WITH (DELIMITER '|'); -- Confirm that data was copied SELECT count(*) FROM customer_copy_hash; count diff --git a/src/test/regress/output/multi_create_schema.source b/src/test/regress/output/multi_create_schema.source index 038d9239f..8826f431a 100644 --- a/src/test/regress/output/multi_create_schema.source +++ b/src/test/regress/output/multi_create_schema.source @@ -12,7 +12,7 @@ SELECT master_create_distributed_table('tpch.nation', 'n_nationkey', 'append'); (1 row) -\COPY tpch.nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|' +\copy tpch.nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|' SELECT count(*) from tpch.nation; count ------- diff --git a/src/test/regress/output/multi_large_shardid.source b/src/test/regress/output/multi_large_shardid.source index 3a7c22329..ac291ca78 100644 --- a/src/test/regress/output/multi_large_shardid.source +++ b/src/test/regress/output/multi_large_shardid.source @@ -7,8 +7,8 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 100200300400500; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 9000000; -- Stage additional data to start using large shard identifiers. -\COPY lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -\COPY lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' +\copy lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +\copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' -- Query #1 from the TPC-H decision support benchmark. SELECT l_returnflag, diff --git a/src/test/regress/output/multi_master_delete_protocol.source b/src/test/regress/output/multi_master_delete_protocol.source index f59722e80..a1499b273 100644 --- a/src/test/regress/output/multi_master_delete_protocol.source +++ b/src/test/regress/output/multi_master_delete_protocol.source @@ -19,9 +19,9 @@ SELECT master_create_distributed_table('customer_delete_protocol', 'c_custkey', (1 row) -\COPY customer_delete_protocol FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' -\COPY customer_delete_protocol FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' -\COPY customer_delete_protocol FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' +\copy customer_delete_protocol FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' +\copy customer_delete_protocol FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' +\copy customer_delete_protocol FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' -- Testing master_apply_delete_command -- Check that we don't support conditions on columns other than partition key. SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol diff --git a/src/test/regress/output/multi_outer_join.source b/src/test/regress/output/multi_outer_join.source index b3f45d825..24365317b 100644 --- a/src/test/regress/output/multi_outer_join.source +++ b/src/test/regress/output/multi_outer_join.source @@ -62,10 +62,10 @@ FROM ERROR: cannot perform distributed planning on this query DETAIL: Shards of relations in outer join queries must have 1-to-1 shard partitioning -- Left table is a large table -\COPY multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-10.data' with delimiter '|' -\COPY multi_outer_join_left FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|' +\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-10.data' with delimiter '|' +\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|' -- Right table is a small table -\COPY multi_outer_join_right FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|' +\copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|' -- Make sure we do not crash if one table has no shards SELECT min(l_custkey), max(l_custkey) @@ -84,7 +84,7 @@ LOG: join order: [ "multi_outer_join_third" ][ broadcast join "multi_outer_join (1 row) -- Third table is a single shard table with all data -\COPY multi_outer_join_third FROM '@abs_srcdir@/data/customer-1-30.data' with delimiter '|' +\copy multi_outer_join_third FROM '@abs_srcdir@/data/customer-1-30.data' with delimiter '|' -- Regular outer join should return results for all rows SELECT min(l_custkey), max(l_custkey) @@ -202,7 +202,7 @@ LOG: join order: [ "multi_outer_join_left" ][ broadcast join "multi_outer_join_ (1 row) -- Turn the right table into a large table -\COPY multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' +\copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' -- Shards do not have 1-1 matching. We should error here. SELECT min(l_custkey), max(l_custkey) @@ -224,10 +224,10 @@ SELECT * FROM master_apply_delete_command('DELETE FROM multi_outer_join_right'); (1 row) -- reload shards with 1-1 matching -\COPY multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|' -\COPY multi_outer_join_left FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' -\COPY multi_outer_join_right FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|' -\COPY multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' +\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|' +\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' +\copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|' +\copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' -- multi_outer_join_third is a single shard table -- Regular left join should work as expected SELECT @@ -754,7 +754,7 @@ LIMIT 20; ERROR: cannot perform distributed planning on this query DETAIL: Subqueries in outer joins are not supported -- Add a shard to the left table that overlaps with multiple shards in the right -\COPY multi_outer_join_left FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' +\copy multi_outer_join_left FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' -- All outer joins should error out SELECT min(l_custkey), max(l_custkey) diff --git a/src/test/regress/output/multi_stage_data.source b/src/test/regress/output/multi_stage_data.source index ca47fbb88..0cc434c59 100644 --- a/src/test/regress/output/multi_stage_data.source +++ b/src/test/regress/output/multi_stage_data.source @@ -7,11 +7,11 @@ -- policy is left to the default value (round-robin) to test the common install case. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 290000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 290000; -\COPY lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -\COPY lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' -\COPY orders FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' -\COPY orders FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' -\COPY customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' -\COPY nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|' -\COPY part FROM '@abs_srcdir@/data/part.data' with delimiter '|' -\COPY supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|' +\copy lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +\copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' +\copy orders FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' +\copy orders FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' +\copy customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' +\copy nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|' +\copy part FROM '@abs_srcdir@/data/part.data' with delimiter '|' +\copy supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|' diff --git a/src/test/regress/output/multi_stage_large_records.source b/src/test/regress/output/multi_stage_large_records.source index e40e2ee71..918a30401 100644 --- a/src/test/regress/output/multi_stage_large_records.source +++ b/src/test/regress/output/multi_stage_large_records.source @@ -14,7 +14,7 @@ SELECT master_create_distributed_table('large_records_table', 'data_id', 'append (1 row) -\COPY large_records_table FROM '@abs_srcdir@/data/large_records.data' with delimiter '|' +\copy large_records_table FROM '@abs_srcdir@/data/large_records.data' with delimiter '|' SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard, pg_class WHERE pg_class.oid=logicalrelid AND relname='large_records_table' ORDER BY shardid; diff --git a/src/test/regress/output/multi_stage_more_data.source b/src/test/regress/output/multi_stage_more_data.source index b1f51c726..20dd3e89a 100644 --- a/src/test/regress/output/multi_stage_more_data.source +++ b/src/test/regress/output/multi_stage_more_data.source @@ -6,6 +6,6 @@ ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 280000; -- We stage more data to customer and part tables to test distributed joins. The -- staging causes the planner to consider customer and part tables as large, and -- evaluate plans where some of the underlying tables need to be repartitioned. -\COPY customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' -\COPY customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' -\COPY part FROM '@abs_srcdir@/data/part.more.data' with delimiter '|' +\copy customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' +\copy customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' +\copy part FROM '@abs_srcdir@/data/part.more.data' with delimiter '|' diff --git a/src/test/regress/output/multi_subquery.source b/src/test/regress/output/multi_subquery.source index 96d8544d6..992295706 100644 --- a/src/test/regress/output/multi_subquery.source +++ b/src/test/regress/output/multi_subquery.source @@ -100,10 +100,10 @@ SELECT master_create_empty_shard('orders_subquery') AS new_shard_id UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14946 WHERE shardid = :new_shard_id; SET citus.shard_max_size TO "1MB"; -\COPY lineitem_subquery FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -\COPY lineitem_subquery FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' -\COPY orders_subquery FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' -\COPY orders_subquery FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' +\copy lineitem_subquery FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +\copy lineitem_subquery FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' +\copy orders_subquery FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' +\copy orders_subquery FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' -- Check that we error out if shard min/max values are not exactly same. SELECT avg(unit_price) diff --git a/src/test/regress/sql/multi_agg_approximate_distinct.sql b/src/test/regress/sql/multi_agg_approximate_distinct.sql index a80b59967..1a8a7d168 100644 --- a/src/test/regress/sql/multi_agg_approximate_distinct.sql +++ b/src/test/regress/sql/multi_agg_approximate_distinct.sql @@ -65,7 +65,7 @@ CREATE TABLE test_count_distinct_schema.nation_hash( SELECT master_create_distributed_table('test_count_distinct_schema.nation_hash', 'n_nationkey', 'hash'); SELECT master_create_worker_shards('test_count_distinct_schema.nation_hash', 4, 2); -\COPY test_count_distinct_schema.nation_hash FROM STDIN with delimiter '|'; +\copy test_count_distinct_schema.nation_hash FROM STDIN with delimiter '|'; 0|ALGERIA|0|haggle. carefully final deposits detect slyly agai 1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon 2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special diff --git a/src/test/regress/sql/multi_schema_support.sql b/src/test/regress/sql/multi_schema_support.sql index 2e827dd19..384814fa3 100644 --- a/src/test/regress/sql/multi_schema_support.sql +++ b/src/test/regress/sql/multi_schema_support.sql @@ -18,7 +18,7 @@ CREATE TABLE public.nation_local( n_comment varchar(152) ); -\COPY public.nation_local FROM STDIN with delimiter '|'; +\copy public.nation_local FROM STDIN with delimiter '|'; 0|ALGERIA|0|haggle. carefully final deposits detect slyly agai 1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon 2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special @@ -85,7 +85,7 @@ CREATE TABLE nation_append_search_path( ); SELECT master_create_distributed_table('nation_append_search_path', 'n_nationkey', 'append'); -\COPY nation_append_search_path FROM STDIN with delimiter '|'; +\copy nation_append_search_path FROM STDIN with delimiter '|'; 0|ALGERIA|0|haggle. carefully final deposits detect slyly agai 1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon 2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special @@ -146,7 +146,7 @@ SELECT * FROM nation_hash WHERE n_nationkey = 7; -- test UDFs with schemas SET search_path TO public; -\COPY test_schema_support.nation_hash FROM STDIN with delimiter '|'; +\copy test_schema_support.nation_hash FROM STDIN with delimiter '|'; 0|ALGERIA|0|haggle. carefully final deposits detect slyly agai 1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon 2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special @@ -337,7 +337,7 @@ CREATE TABLE test_schema_support.nation_hash_collation( SELECT master_create_distributed_table('test_schema_support.nation_hash_collation', 'n_nationkey', 'hash'); SELECT master_create_worker_shards('test_schema_support.nation_hash_collation', 4, 2); -\COPY test_schema_support.nation_hash_collation FROM STDIN with delimiter '|'; +\copy test_schema_support.nation_hash_collation FROM STDIN with delimiter '|'; 0|ALGERIA|0|haggle. carefully final deposits detect slyly agai 1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon 2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special @@ -360,7 +360,7 @@ CREATE TABLE nation_hash_collation_search_path( SELECT master_create_distributed_table('nation_hash_collation_search_path', 'n_nationkey', 'hash'); SELECT master_create_worker_shards('nation_hash_collation_search_path', 4, 2); -\COPY nation_hash_collation_search_path FROM STDIN with delimiter '|'; +\copy nation_hash_collation_search_path FROM STDIN with delimiter '|'; 0|ALGERIA|0|haggle. carefully final deposits detect slyly agai 1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon 2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special @@ -396,7 +396,7 @@ SELECT master_create_distributed_table('test_schema_support.nation_hash_composit SELECT master_create_worker_shards('test_schema_support.nation_hash_composite_types', 4, 2); -- insert some data to verify composite type queries -\COPY test_schema_support.nation_hash_composite_types FROM STDIN with delimiter '|'; +\copy test_schema_support.nation_hash_composite_types FROM STDIN with delimiter '|'; 0|ALGERIA|0|haggle. carefully final deposits detect slyly agai|(a,a) 1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon|(a,b) 2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special |(a,c) @@ -531,7 +531,7 @@ SELECT master_apply_delete_command('DELETE FROM test_schema_support.nation_appen -- test with search_path is set SET search_path TO test_schema_support; -\COPY nation_append FROM STDIN with delimiter '|'; +\copy nation_append FROM STDIN with delimiter '|'; 0|ALGERIA|0| haggle. carefully final deposits detect slyly agai 1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon 2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special @@ -578,7 +578,7 @@ CREATE TABLE test_schema_support_join_2.nation_hash ( SELECT master_create_distributed_table('test_schema_support_join_1.nation_hash', 'n_nationkey', 'hash'); SELECT master_create_worker_shards('test_schema_support_join_1.nation_hash', 4, 1); -\COPY test_schema_support_join_1.nation_hash FROM STDIN with delimiter '|'; +\copy test_schema_support_join_1.nation_hash FROM STDIN with delimiter '|'; 0|ALGERIA|0|haggle. carefully final deposits detect slyly agai 1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon 2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special @@ -590,7 +590,7 @@ SELECT master_create_worker_shards('test_schema_support_join_1.nation_hash', 4, SELECT master_create_distributed_table('test_schema_support_join_1.nation_hash_2', 'n_nationkey', 'hash'); SELECT master_create_worker_shards('test_schema_support_join_1.nation_hash_2', 4, 1); -\COPY test_schema_support_join_1.nation_hash_2 FROM STDIN with delimiter '|'; +\copy test_schema_support_join_1.nation_hash_2 FROM STDIN with delimiter '|'; 0|ALGERIA|0|haggle. carefully final deposits detect slyly agai 1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon 2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special @@ -602,7 +602,7 @@ SELECT master_create_worker_shards('test_schema_support_join_1.nation_hash_2', 4 SELECT master_create_distributed_table('test_schema_support_join_2.nation_hash', 'n_nationkey', 'hash'); SELECT master_create_worker_shards('test_schema_support_join_2.nation_hash', 4, 1); -\COPY test_schema_support_join_2.nation_hash FROM STDIN with delimiter '|'; +\copy test_schema_support_join_2.nation_hash FROM STDIN with delimiter '|'; 0|ALGERIA|0|haggle. carefully final deposits detect slyly agai 1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon 2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special From 35e9f51348bc8a28a91051f537062cd4b495eddc Mon Sep 17 00:00:00 2001 From: Jason Petersen Date: Mon, 22 Aug 2016 11:48:41 -0600 Subject: [PATCH 3/5] Replace verb 'stage' with 'load' in schedules "Staging table" will be the only valid use of 'stage' from now on. --- src/test/regress/multi_fdw_schedule | 3 ++- src/test/regress/multi_schedule | 18 +++++++++--------- .../regress/multi_task_tracker_extra_schedule | 10 +++++----- 3 files changed, 16 insertions(+), 15 deletions(-) diff --git a/src/test/regress/multi_fdw_schedule b/src/test/regress/multi_fdw_schedule index e9c861022..91d27c02c 100644 --- a/src/test/regress/multi_fdw_schedule +++ b/src/test/regress/multi_fdw_schedule @@ -20,6 +20,7 @@ test: multi_tpch_query1 multi_tpch_query3 multi_tpch_query6 multi_tpch_query10 test: multi_tpch_query12 multi_tpch_query14 multi_tpch_query19 # ---------- -# multi_fdw_large_shardid stages more shards into lineitem, and must come last +# multi_fdw_large_shardid loads more lineitem data using high shard identifiers, and must +# come last # ---------- test: multi_fdw_large_shardid diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule index 7ea2e94d6..86330cb70 100644 --- a/src/test/regress/multi_schedule +++ b/src/test/regress/multi_schedule @@ -55,7 +55,7 @@ test: multi_tpch_query12 multi_tpch_query14 multi_tpch_query19 test: multi_tpch_query7 multi_tpch_query7_nested # ---------- -# Parallel tests to check our join order planning logic. Note that we stage data +# Parallel tests to check our join order planning logic. Note that we load data # below; and therefore these tests should come after the execution tests. # ---------- test: multi_join_order_tpch_small multi_join_order_additional @@ -63,16 +63,16 @@ test: multi_stage_more_data test: multi_join_order_tpch_large # ---------- -# Tests for large-table join planning and execution. -# Be careful when staging new data before these tests, as they -# expect specific shard identifiers in the output. +# Tests for large-table join planning and execution. Be careful when creating +# new shards before these tests, as they expect specific shard identifiers in +# the output. # ---------- test: multi_large_table_join_planning test: multi_large_table_pruning test: multi_large_table_task_assignment # ---------- -# Tests to check our large record staging and shard deletion behavior +# Tests to check our large record loading and shard deletion behavior # ---------- test: multi_stage_large_records test: multi_master_delete_protocol @@ -85,7 +85,7 @@ test: multi_index_statements test: multi_alter_table_statements # ---------- -# multi_create_schema tests creation, staging and querying of a table in a new +# multi_create_schema tests creation, loading, and querying of a table in a new # schema (namespace). # ---------- test: multi_create_schema @@ -97,13 +97,13 @@ test: multi_create_schema test: multi_utility_warnings # --------- -# multi_append_table_to_shard stages shards in a way that forces +# multi_append_table_to_shard loads data to create shards in a way that forces # shard caching. # --------- test: multi_append_table_to_shard # --------- -# multi_outer_join stages shards to create different mappings for outer joins +# multi_outer_join loads data to create shards to test outer join mappings # --------- test: multi_outer_join @@ -141,7 +141,7 @@ test: multi_copy test: multi_router_planner # ---------- -# multi_large_shardid stages more shards into lineitem +# multi_large_shardid loads more lineitem data using high shard identifiers # ---------- test: multi_large_shardid diff --git a/src/test/regress/multi_task_tracker_extra_schedule b/src/test/regress/multi_task_tracker_extra_schedule index 7482d4f42..42426dd7e 100644 --- a/src/test/regress/multi_task_tracker_extra_schedule +++ b/src/test/regress/multi_task_tracker_extra_schedule @@ -48,7 +48,7 @@ test: multi_tpch_query12 multi_tpch_query14 multi_tpch_query19 test: multi_tpch_query7 multi_tpch_query7_nested # ---------- -# Parallel tests to check our join order planning logic. Note that we stage data +# Parallel tests to check our join order planning logic. Note that we load data # below; and therefore these tests should come after the execution tests. # ---------- test: multi_join_order_tpch_small multi_join_order_additional @@ -56,20 +56,20 @@ test: multi_stage_more_data test: multi_join_order_tpch_large # ---------- -# Tests to check our large record staging and shard deletion behavior +# Tests to check our large record loading and shard deletion behavior # ---------- test: multi_stage_large_records test: multi_master_delete_protocol test: multi_shard_modify # ---------- -# multi_create_schema tests creation, staging and querying of a table in a new +# multi_create_schema tests creation, loading, and querying of a table in a new # schema (namespace). # ---------- test: multi_create_schema # --------- -# multi_outer_join stages shards to create different mappings for outer joins +# multi_outer_join loads data to create shards to test outer join mappings # --------- test: multi_outer_join @@ -99,7 +99,7 @@ test: multi_data_types test: multi_copy # ---------- -# multi_large_shardid stages more shards into lineitem +# multi_large_shardid loads more lineitem data using high shard identifiers # ---------- test: multi_large_shardid From b391abda3d6ea70a3a884665079813b4bddb777c Mon Sep 17 00:00:00 2001 From: Jason Petersen Date: Mon, 22 Aug 2016 13:24:18 -0600 Subject: [PATCH 4/5] Replace verb 'stage' with 'load' in test comments "Staging table" will be the only valid use of 'stage' from now on, we will now say "load" when talking about data ingestion. If creation of shards is its own step, we'll just say "shard creation". --- src/test/regress/expected/multi_hash_pruning.out | 2 +- src/test/regress/expected/multi_hash_pruning_0.out | 2 +- src/test/regress/expected/multi_join_order_tpch_large.out | 2 +- src/test/regress/expected/multi_shard_modify.out | 2 +- src/test/regress/input/multi_agg_distinct.source | 2 +- src/test/regress/input/multi_agg_type_conversion.source | 2 +- src/test/regress/input/multi_append_table_to_shard.source | 6 +++--- src/test/regress/input/multi_large_shardid.source | 4 ++-- src/test/regress/input/multi_master_delete_protocol.source | 2 +- src/test/regress/input/multi_stage_data.source | 2 +- src/test/regress/input/multi_stage_large_records.source | 2 +- src/test/regress/input/multi_stage_more_data.source | 4 ++-- src/test/regress/input/multi_subquery.source | 2 +- src/test/regress/output/multi_agg_distinct.source | 2 +- src/test/regress/output/multi_agg_type_conversion.source | 2 +- src/test/regress/output/multi_append_table_to_shard.source | 6 +++--- src/test/regress/output/multi_large_shardid.source | 4 ++-- src/test/regress/output/multi_master_delete_protocol.source | 2 +- src/test/regress/output/multi_stage_data.source | 2 +- src/test/regress/output/multi_stage_large_records.source | 2 +- src/test/regress/output/multi_stage_more_data.source | 4 ++-- src/test/regress/output/multi_subquery.source | 2 +- src/test/regress/sql/multi_hash_pruning.sql | 2 +- src/test/regress/sql/multi_join_order_tpch_large.sql | 2 +- src/test/regress/sql/multi_shard_modify.sql | 2 +- 25 files changed, 33 insertions(+), 33 deletions(-) diff --git a/src/test/regress/expected/multi_hash_pruning.out b/src/test/regress/expected/multi_hash_pruning.out index a6f5a0688..de473e728 100644 --- a/src/test/regress/expected/multi_hash_pruning.out +++ b/src/test/regress/expected/multi_hash_pruning.out @@ -5,7 +5,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 630000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 630000; -- Create a table partitioned on integer column and update partition type to --- hash. Then stage data to this table and update shard min max values with +-- hash. Then load data into this table and update shard min max values with -- hashed ones. Hash value of 1, 2, 3 and 4 are consecutively -1905060026, -- 1134484726, -28094569 and -1011077333. CREATE TABLE orders_hash_partitioned ( diff --git a/src/test/regress/expected/multi_hash_pruning_0.out b/src/test/regress/expected/multi_hash_pruning_0.out index 35b3ea99b..c23e37ea1 100644 --- a/src/test/regress/expected/multi_hash_pruning_0.out +++ b/src/test/regress/expected/multi_hash_pruning_0.out @@ -5,7 +5,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 630000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 630000; -- Create a table partitioned on integer column and update partition type to --- hash. Then stage data to this table and update shard min max values with +-- hash. Then load data into this table and update shard min max values with -- hashed ones. Hash value of 1, 2, 3 and 4 are consecutively -1905060026, -- 1134484726, -28094569 and -1011077333. CREATE TABLE orders_hash_partitioned ( diff --git a/src/test/regress/expected/multi_join_order_tpch_large.out b/src/test/regress/expected/multi_join_order_tpch_large.out index 6532d9ea1..327687eac 100644 --- a/src/test/regress/expected/multi_join_order_tpch_large.out +++ b/src/test/regress/expected/multi_join_order_tpch_large.out @@ -9,7 +9,7 @@ SET citus.log_multi_join_order TO TRUE; SET client_min_messages TO LOG; -- Change configuration to treat lineitem, orders, customer, and part tables as -- large. The following queries are basically the same as the ones in tpch_small --- except that more data has been staged to customer and part tables. Therefore, +-- except that more data has been loaded into customer and part tables. Therefore, -- we will apply different distributed join strategies for these queries. SET citus.large_table_shard_count TO 2; -- Query #6 from the TPC-H decision support benchmark diff --git a/src/test/regress/expected/multi_shard_modify.out b/src/test/regress/expected/multi_shard_modify.out index fc4866d04..d722c3ad3 100644 --- a/src/test/regress/expected/multi_shard_modify.out +++ b/src/test/regress/expected/multi_shard_modify.out @@ -3,7 +3,7 @@ -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 350000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 350000; --- Create a new hash partitioned multi_shard_modify_test table and stage data into it. +-- Create a new hash partitioned multi_shard_modify_test table and load data into it. CREATE TABLE multi_shard_modify_test ( t_key integer not null, t_name varchar(25) not null, diff --git a/src/test/regress/input/multi_agg_distinct.source b/src/test/regress/input/multi_agg_distinct.source index ec6f525ed..a63ecbb05 100644 --- a/src/test/regress/input/multi_agg_distinct.source +++ b/src/test/regress/input/multi_agg_distinct.source @@ -7,7 +7,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 200000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 200000; --- Create a new range partitioned lineitem table and stage data into it +-- Create a new range partitioned lineitem table and load data into it CREATE TABLE lineitem_range ( l_orderkey bigint not null, l_partkey integer not null, diff --git a/src/test/regress/input/multi_agg_type_conversion.source b/src/test/regress/input/multi_agg_type_conversion.source index 8ef9d2b94..71bdc7e15 100644 --- a/src/test/regress/input/multi_agg_type_conversion.source +++ b/src/test/regress/input/multi_agg_type_conversion.source @@ -14,7 +14,7 @@ SELECT sum(l_suppkey) / 2::numeric FROM lineitem; SELECT sum(l_suppkey)::int8 / 2 FROM lineitem; --- Create a new table to test type conversions on different types, and stage +-- Create a new table to test type conversions on different types, and load -- data into this table. Then, apply aggregate functions and divide / multiply -- the results to test type conversions. diff --git a/src/test/regress/input/multi_append_table_to_shard.source b/src/test/regress/input/multi_append_table_to_shard.source index fef9e8488..def84b299 100644 --- a/src/test/regress/input/multi_append_table_to_shard.source +++ b/src/test/regress/input/multi_append_table_to_shard.source @@ -111,7 +111,7 @@ SELECT master_create_empty_shard('multi_append_table_to_shard_date'); SELECT * FROM multi_append_table_to_shard_date; --- Stage an empty table and check that we can query the distributed table +-- Create an empty distributed table and check that we can query it CREATE TABLE multi_append_table_to_shard_stage (LIKE multi_append_table_to_shard_date); SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) FROM @@ -120,7 +120,7 @@ WHERE 'multi_append_table_to_shard_date'::regclass::oid = logicalrelid; SELECT * FROM multi_append_table_to_shard_date; --- Stage NULL values and check that we can query the table +-- INSERT NULL values and check that we can query the table INSERT INTO multi_append_table_to_shard_stage VALUES (NULL, NULL); SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) FROM @@ -129,7 +129,7 @@ WHERE 'multi_append_table_to_shard_date'::regclass::oid = logicalrelid; SELECT * FROM multi_append_table_to_shard_date; --- Stage regular values and check that we can query the table +-- INSERT regular values and check that we can query the table INSERT INTO multi_append_table_to_shard_stage VALUES ('2016-01-01', 3); SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) FROM diff --git a/src/test/regress/input/multi_large_shardid.source b/src/test/regress/input/multi_large_shardid.source index 53767e90c..2ce50e190 100644 --- a/src/test/regress/input/multi_large_shardid.source +++ b/src/test/regress/input/multi_large_shardid.source @@ -2,7 +2,7 @@ -- MULTI_LARGE_SHARDID -- --- Stage data to distributed tables, and run TPC-H query #1 and #6. This test +-- Load data into distributed tables, and run TPC-H query #1 and #6. This test -- differs from previous tests in that it modifies the *internal* shardId -- generator, forcing the distributed database to use 64-bit shard identifiers. @@ -11,7 +11,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 100200300400500; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 9000000; --- Stage additional data to start using large shard identifiers. +-- Load additional data to start using large shard identifiers. \copy lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' \copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' diff --git a/src/test/regress/input/multi_master_delete_protocol.source b/src/test/regress/input/multi_master_delete_protocol.source index 4160102e6..378985b63 100644 --- a/src/test/regress/input/multi_master_delete_protocol.source +++ b/src/test/regress/input/multi_master_delete_protocol.source @@ -7,7 +7,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 320000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 320000; --- Create a new range partitioned customer_delete_protocol table and stage data into it. +-- Create a new range partitioned customer_delete_protocol table and load data into it. CREATE TABLE customer_delete_protocol ( c_custkey integer not null, c_name varchar(25) not null, diff --git a/src/test/regress/input/multi_stage_data.source b/src/test/regress/input/multi_stage_data.source index 850b4edef..6daf16d3e 100644 --- a/src/test/regress/input/multi_stage_data.source +++ b/src/test/regress/input/multi_stage_data.source @@ -1,7 +1,7 @@ -- -- MULTI_STAGE_DATA -- --- Tests for staging data in a distributed cluster. Please note that the number +-- Tests for loading data in a distributed cluster. Please note that the number -- of shards uploaded depends on two config values: citus.shard_replication_factor and -- citus.shard_max_size. These values are set in pg_regress_multi.pl. Shard placement -- policy is left to the default value (round-robin) to test the common install case. diff --git a/src/test/regress/input/multi_stage_large_records.source b/src/test/regress/input/multi_stage_large_records.source index 18c73b55e..d94396ce8 100644 --- a/src/test/regress/input/multi_stage_large_records.source +++ b/src/test/regress/input/multi_stage_large_records.source @@ -1,7 +1,7 @@ -- -- MULTI_STAGE_LARGE_RECORDS -- --- Tests for staging data with large records (i.e. greater than the read buffer +-- Tests for loading data with large records (i.e. greater than the read buffer -- size, which is 32kB) in a distributed cluster. These tests make sure that we -- are creating shards of correct size even when records are large. diff --git a/src/test/regress/input/multi_stage_more_data.source b/src/test/regress/input/multi_stage_more_data.source index 651ce2625..655201ac9 100644 --- a/src/test/regress/input/multi_stage_more_data.source +++ b/src/test/regress/input/multi_stage_more_data.source @@ -7,8 +7,8 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 280000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 280000; --- We stage more data to customer and part tables to test distributed joins. The --- staging causes the planner to consider customer and part tables as large, and +-- We load more data to customer and part tables to test distributed joins. The +-- loading causes the planner to consider customer and part tables as large, and -- evaluate plans where some of the underlying tables need to be repartitioned. \copy customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' diff --git a/src/test/regress/input/multi_subquery.source b/src/test/regress/input/multi_subquery.source index 5976e2dff..e3f684b34 100644 --- a/src/test/regress/input/multi_subquery.source +++ b/src/test/regress/input/multi_subquery.source @@ -77,7 +77,7 @@ FROM GROUP BY l_orderkey) AS unit_prices; --- Stage data to tables. +-- Load data into tables. SELECT master_create_empty_shard('lineitem_subquery') AS new_shard_id \gset diff --git a/src/test/regress/output/multi_agg_distinct.source b/src/test/regress/output/multi_agg_distinct.source index cb75d8f23..41bc722ee 100644 --- a/src/test/regress/output/multi_agg_distinct.source +++ b/src/test/regress/output/multi_agg_distinct.source @@ -3,7 +3,7 @@ -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 200000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 200000; --- Create a new range partitioned lineitem table and stage data into it +-- Create a new range partitioned lineitem table and load data into it CREATE TABLE lineitem_range ( l_orderkey bigint not null, l_partkey integer not null, diff --git a/src/test/regress/output/multi_agg_type_conversion.source b/src/test/regress/output/multi_agg_type_conversion.source index 1ba41083b..82a30be1a 100644 --- a/src/test/regress/output/multi_agg_type_conversion.source +++ b/src/test/regress/output/multi_agg_type_conversion.source @@ -28,7 +28,7 @@ SELECT sum(l_suppkey)::int8 / 2 FROM lineitem; 30308988 (1 row) --- Create a new table to test type conversions on different types, and stage +-- Create a new table to test type conversions on different types, and load -- data into this table. Then, apply aggregate functions and divide / multiply -- the results to test type conversions. CREATE TABLE aggregate_type ( diff --git a/src/test/regress/output/multi_append_table_to_shard.source b/src/test/regress/output/multi_append_table_to_shard.source index c04a9563d..8bf9f69da 100644 --- a/src/test/regress/output/multi_append_table_to_shard.source +++ b/src/test/regress/output/multi_append_table_to_shard.source @@ -175,7 +175,7 @@ SELECT * FROM multi_append_table_to_shard_date; ------------+------- (0 rows) --- Stage an empty table and check that we can query the distributed table +-- Create an empty distributed table and check that we can query it CREATE TABLE multi_append_table_to_shard_stage (LIKE multi_append_table_to_shard_date); SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) FROM @@ -191,7 +191,7 @@ SELECT * FROM multi_append_table_to_shard_date; ------------+------- (0 rows) --- Stage NULL values and check that we can query the table +-- INSERT NULL values and check that we can query the table INSERT INTO multi_append_table_to_shard_stage VALUES (NULL, NULL); SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) FROM @@ -208,7 +208,7 @@ SELECT * FROM multi_append_table_to_shard_date; | (1 row) --- Stage regular values and check that we can query the table +-- INSERT regular values and check that we can query the table INSERT INTO multi_append_table_to_shard_stage VALUES ('2016-01-01', 3); SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) FROM diff --git a/src/test/regress/output/multi_large_shardid.source b/src/test/regress/output/multi_large_shardid.source index ac291ca78..d223875b0 100644 --- a/src/test/regress/output/multi_large_shardid.source +++ b/src/test/regress/output/multi_large_shardid.source @@ -1,12 +1,12 @@ -- -- MULTI_LARGE_SHARDID -- --- Stage data to distributed tables, and run TPC-H query #1 and #6. This test +-- Load data into distributed tables, and run TPC-H query #1 and #6. This test -- differs from previous tests in that it modifies the *internal* shardId -- generator, forcing the distributed database to use 64-bit shard identifiers. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 100200300400500; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 9000000; --- Stage additional data to start using large shard identifiers. +-- Load additional data to start using large shard identifiers. \copy lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' \copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' -- Query #1 from the TPC-H decision support benchmark. diff --git a/src/test/regress/output/multi_master_delete_protocol.source b/src/test/regress/output/multi_master_delete_protocol.source index a1499b273..0e35f9b4d 100644 --- a/src/test/regress/output/multi_master_delete_protocol.source +++ b/src/test/regress/output/multi_master_delete_protocol.source @@ -3,7 +3,7 @@ -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 320000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 320000; --- Create a new range partitioned customer_delete_protocol table and stage data into it. +-- Create a new range partitioned customer_delete_protocol table and load data into it. CREATE TABLE customer_delete_protocol ( c_custkey integer not null, c_name varchar(25) not null, diff --git a/src/test/regress/output/multi_stage_data.source b/src/test/regress/output/multi_stage_data.source index 0cc434c59..b27ff1e3b 100644 --- a/src/test/regress/output/multi_stage_data.source +++ b/src/test/regress/output/multi_stage_data.source @@ -1,7 +1,7 @@ -- -- MULTI_STAGE_DATA -- --- Tests for staging data in a distributed cluster. Please note that the number +-- Tests for loading data in a distributed cluster. Please note that the number -- of shards uploaded depends on two config values: citus.shard_replication_factor and -- citus.shard_max_size. These values are set in pg_regress_multi.pl. Shard placement -- policy is left to the default value (round-robin) to test the common install case. diff --git a/src/test/regress/output/multi_stage_large_records.source b/src/test/regress/output/multi_stage_large_records.source index 918a30401..70a53cf26 100644 --- a/src/test/regress/output/multi_stage_large_records.source +++ b/src/test/regress/output/multi_stage_large_records.source @@ -1,7 +1,7 @@ -- -- MULTI_STAGE_LARGE_RECORDS -- --- Tests for staging data with large records (i.e. greater than the read buffer +-- Tests for loading data with large records (i.e. greater than the read buffer -- size, which is 32kB) in a distributed cluster. These tests make sure that we -- are creating shards of correct size even when records are large. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 300000; diff --git a/src/test/regress/output/multi_stage_more_data.source b/src/test/regress/output/multi_stage_more_data.source index 20dd3e89a..dec7d9c26 100644 --- a/src/test/regress/output/multi_stage_more_data.source +++ b/src/test/regress/output/multi_stage_more_data.source @@ -3,8 +3,8 @@ -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 280000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 280000; --- We stage more data to customer and part tables to test distributed joins. The --- staging causes the planner to consider customer and part tables as large, and +-- We load more data to customer and part tables to test distributed joins. The +-- loading causes the planner to consider customer and part tables as large, and -- evaluate plans where some of the underlying tables need to be repartitioned. \copy customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' \copy customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' diff --git a/src/test/regress/output/multi_subquery.source b/src/test/regress/output/multi_subquery.source index 992295706..e0340f3f6 100644 --- a/src/test/regress/output/multi_subquery.source +++ b/src/test/regress/output/multi_subquery.source @@ -82,7 +82,7 @@ FROM (1 row) --- Stage data to tables. +-- Load data into tables. SELECT master_create_empty_shard('lineitem_subquery') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986 diff --git a/src/test/regress/sql/multi_hash_pruning.sql b/src/test/regress/sql/multi_hash_pruning.sql index d6210d8f8..0088264a4 100644 --- a/src/test/regress/sql/multi_hash_pruning.sql +++ b/src/test/regress/sql/multi_hash_pruning.sql @@ -10,7 +10,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 630000; -- Create a table partitioned on integer column and update partition type to --- hash. Then stage data to this table and update shard min max values with +-- hash. Then load data into this table and update shard min max values with -- hashed ones. Hash value of 1, 2, 3 and 4 are consecutively -1905060026, -- 1134484726, -28094569 and -1011077333. diff --git a/src/test/regress/sql/multi_join_order_tpch_large.sql b/src/test/regress/sql/multi_join_order_tpch_large.sql index 89ab65411..542b33385 100644 --- a/src/test/regress/sql/multi_join_order_tpch_large.sql +++ b/src/test/regress/sql/multi_join_order_tpch_large.sql @@ -15,7 +15,7 @@ SET client_min_messages TO LOG; -- Change configuration to treat lineitem, orders, customer, and part tables as -- large. The following queries are basically the same as the ones in tpch_small --- except that more data has been staged to customer and part tables. Therefore, +-- except that more data has been loaded into customer and part tables. Therefore, -- we will apply different distributed join strategies for these queries. SET citus.large_table_shard_count TO 2; diff --git a/src/test/regress/sql/multi_shard_modify.sql b/src/test/regress/sql/multi_shard_modify.sql index f8bd8f88c..9b1fabf26 100644 --- a/src/test/regress/sql/multi_shard_modify.sql +++ b/src/test/regress/sql/multi_shard_modify.sql @@ -7,7 +7,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 350000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 350000; --- Create a new hash partitioned multi_shard_modify_test table and stage data into it. +-- Create a new hash partitioned multi_shard_modify_test table and load data into it. CREATE TABLE multi_shard_modify_test ( t_key integer not null, t_name varchar(25) not null, From e54d3f6d32b74a343eb9f8873df39bba8bbcbec1 Mon Sep 17 00:00:00 2001 From: Jason Petersen Date: Mon, 22 Aug 2016 13:32:53 -0600 Subject: [PATCH 5/5] Rename test files with 'stage' in name Ignored FDW files as those test are being removed entirely, I believe. --- src/test/regress/expected/.gitignore | 6 +++--- .../{multi_stage_data.source => multi_load_data.source} | 0 ...large_records.source => multi_load_large_records.source} | 0 ...i_stage_more_data.source => multi_load_more_data.source} | 0 src/test/regress/multi_binary_schedule | 2 +- src/test/regress/multi_schedule | 6 +++--- src/test/regress/multi_task_tracker_extra_schedule | 6 +++--- .../{multi_stage_data.source => multi_load_data.source} | 0 ...large_records.source => multi_load_large_records.source} | 0 ...i_stage_more_data.source => multi_load_more_data.source} | 0 src/test/regress/sql/.gitignore | 6 +++--- 11 files changed, 13 insertions(+), 13 deletions(-) rename src/test/regress/input/{multi_stage_data.source => multi_load_data.source} (100%) rename src/test/regress/input/{multi_stage_large_records.source => multi_load_large_records.source} (100%) rename src/test/regress/input/{multi_stage_more_data.source => multi_load_more_data.source} (100%) rename src/test/regress/output/{multi_stage_data.source => multi_load_data.source} (100%) rename src/test/regress/output/{multi_stage_large_records.source => multi_load_large_records.source} (100%) rename src/test/regress/output/{multi_stage_more_data.source => multi_load_more_data.source} (100%) diff --git a/src/test/regress/expected/.gitignore b/src/test/regress/expected/.gitignore index 91232bac2..7f1b98f09 100644 --- a/src/test/regress/expected/.gitignore +++ b/src/test/regress/expected/.gitignore @@ -10,9 +10,9 @@ /multi_large_shardid.out /multi_master_delete_protocol.out /multi_outer_join.out -/multi_stage_data.out -/multi_stage_large_records.out -/multi_stage_more_data.out +/multi_load_data.out +/multi_load_large_records.out +/multi_load_more_data.out /multi_subquery.out /multi_subquery_0.out /worker_copy.out diff --git a/src/test/regress/input/multi_stage_data.source b/src/test/regress/input/multi_load_data.source similarity index 100% rename from src/test/regress/input/multi_stage_data.source rename to src/test/regress/input/multi_load_data.source diff --git a/src/test/regress/input/multi_stage_large_records.source b/src/test/regress/input/multi_load_large_records.source similarity index 100% rename from src/test/regress/input/multi_stage_large_records.source rename to src/test/regress/input/multi_load_large_records.source diff --git a/src/test/regress/input/multi_stage_more_data.source b/src/test/regress/input/multi_load_more_data.source similarity index 100% rename from src/test/regress/input/multi_stage_more_data.source rename to src/test/regress/input/multi_load_more_data.source diff --git a/src/test/regress/multi_binary_schedule b/src/test/regress/multi_binary_schedule index 42bca0e45..d5330f064 100644 --- a/src/test/regress/multi_binary_schedule +++ b/src/test/regress/multi_binary_schedule @@ -18,7 +18,7 @@ test: multi_table_ddl # uploading data to it. # ---------- test: multi_create_table -test: multi_stage_data +test: multi_load_data test: multi_basic_queries multi_complex_expressions multi_verify_no_subquery test: multi_single_relation_subquery diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule index 86330cb70..c97da7190 100644 --- a/src/test/regress/multi_schedule +++ b/src/test/regress/multi_schedule @@ -24,7 +24,7 @@ test: multi_table_ddl # ---------- test: multi_create_table test: multi_master_protocol -test: multi_stage_data +test: multi_load_data # ---------- # Miscellaneous tests to check our query planning behavior @@ -59,7 +59,7 @@ test: multi_tpch_query7 multi_tpch_query7_nested # below; and therefore these tests should come after the execution tests. # ---------- test: multi_join_order_tpch_small multi_join_order_additional -test: multi_stage_more_data +test: multi_load_more_data test: multi_join_order_tpch_large # ---------- @@ -74,7 +74,7 @@ test: multi_large_table_task_assignment # ---------- # Tests to check our large record loading and shard deletion behavior # ---------- -test: multi_stage_large_records +test: multi_load_large_records test: multi_master_delete_protocol test: multi_shard_modify diff --git a/src/test/regress/multi_task_tracker_extra_schedule b/src/test/regress/multi_task_tracker_extra_schedule index 42426dd7e..a3252b728 100644 --- a/src/test/regress/multi_task_tracker_extra_schedule +++ b/src/test/regress/multi_task_tracker_extra_schedule @@ -22,7 +22,7 @@ test: multi_table_ddl # ---------- test: multi_create_table test: multi_master_protocol -test: multi_stage_data +test: multi_load_data # ---------- # Miscellaneous tests to check our query planning behavior @@ -52,13 +52,13 @@ test: multi_tpch_query7 multi_tpch_query7_nested # below; and therefore these tests should come after the execution tests. # ---------- test: multi_join_order_tpch_small multi_join_order_additional -test: multi_stage_more_data +test: multi_load_more_data test: multi_join_order_tpch_large # ---------- # Tests to check our large record loading and shard deletion behavior # ---------- -test: multi_stage_large_records +test: multi_load_large_records test: multi_master_delete_protocol test: multi_shard_modify diff --git a/src/test/regress/output/multi_stage_data.source b/src/test/regress/output/multi_load_data.source similarity index 100% rename from src/test/regress/output/multi_stage_data.source rename to src/test/regress/output/multi_load_data.source diff --git a/src/test/regress/output/multi_stage_large_records.source b/src/test/regress/output/multi_load_large_records.source similarity index 100% rename from src/test/regress/output/multi_stage_large_records.source rename to src/test/regress/output/multi_load_large_records.source diff --git a/src/test/regress/output/multi_stage_more_data.source b/src/test/regress/output/multi_load_more_data.source similarity index 100% rename from src/test/regress/output/multi_stage_more_data.source rename to src/test/regress/output/multi_load_more_data.source diff --git a/src/test/regress/sql/.gitignore b/src/test/regress/sql/.gitignore index 77dd85ca3..8782c784d 100644 --- a/src/test/regress/sql/.gitignore +++ b/src/test/regress/sql/.gitignore @@ -9,9 +9,9 @@ /multi_large_shardid.sql /multi_master_delete_protocol.sql /multi_outer_join.sql -/multi_stage_data.sql -/multi_stage_large_records.sql -/multi_stage_more_data.sql +/multi_load_data.sql +/multi_load_large_records.sql +/multi_load_more_data.sql /multi_subquery.sql /worker_copy.sql /multi_complex_count_distinct.sql