mirror of https://github.com/citusdata/citus.git
Merge pull request #575 from citusdata/tests_with_explicit_shardid_jobid
Set Explicit ShardId/JobId In Regression Testspull/1938/head
commit
5e084bfc62
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- MULTI_ARRAY_AGG
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 520000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 520000;
|
||||
-- Check multi_cat_agg() aggregate which is used to implement array_agg()
|
||||
SELECT array_cat_agg(i) FROM (VALUES (ARRAY[1,2]), (NULL), (ARRAY[3,4])) AS t(i);
|
||||
array_cat_agg
|
||||
|
@ -96,10 +98,10 @@ SELECT l_quantity, count(*), avg(l_extendedprice), array_agg(l_orderkey) FROM li
|
|||
GROUP BY l_quantity ORDER BY l_quantity;
|
||||
l_quantity | count | avg | array_agg
|
||||
------------+-------+-----------------------+--------------------------------------------------------------------------------------------------
|
||||
1.00 | 17 | 1477.1258823529411765 | {8997,9026,9158,9184,9220,9222,9348,9383,9476,5543,5633,5634,5698,5766,5856,5857,5986}
|
||||
2.00 | 19 | 3078.4242105263157895 | {9030,9058,9123,9124,9188,9344,9441,9476,5506,5540,5573,5669,5703,5730,5798,5831,5893,5920,5923}
|
||||
3.00 | 14 | 4714.0392857142857143 | {9124,9157,9184,9223,9254,9349,9414,9475,9477,5509,5543,5605,5606,5827}
|
||||
4.00 | 19 | 5929.7136842105263158 | {9091,9120,9281,9347,9382,9440,9473,5504,5507,5508,5511,5538,5764,5766,5826,5829,5862,5959,5985}
|
||||
1.00 | 17 | 1477.1258823529411765 | {5543,5633,5634,5698,5766,5856,5857,5986,8997,9026,9158,9184,9220,9222,9348,9383,9476}
|
||||
2.00 | 19 | 3078.4242105263157895 | {5506,5540,5573,5669,5703,5730,5798,5831,5893,5920,5923,9030,9058,9123,9124,9188,9344,9441,9476}
|
||||
3.00 | 14 | 4714.0392857142857143 | {5509,5543,5605,5606,5827,9124,9157,9184,9223,9254,9349,9414,9475,9477}
|
||||
4.00 | 19 | 5929.7136842105263158 | {5504,5507,5508,5511,5538,5764,5766,5826,5829,5862,5959,5985,9091,9120,9281,9347,9382,9440,9473}
|
||||
(4 rows)
|
||||
|
||||
SELECT l_quantity, array_agg(extract (month FROM o_orderdate)) AS my_month
|
||||
|
@ -107,10 +109,10 @@ SELECT l_quantity, array_agg(extract (month FROM o_orderdate)) AS my_month
|
|||
AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity;
|
||||
l_quantity | my_month
|
||||
------------+------------------------------------------------
|
||||
1.00 | {7,7,4,7,4,2,6,3,5,9,5,7,5,9,11,11,4}
|
||||
2.00 | {7,6,6,10,1,12,6,5,11,10,8,5,5,12,3,11,7,11,5}
|
||||
3.00 | {10,6,7,8,5,8,9,11,3,4,9,8,11,7}
|
||||
4.00 | {11,6,2,8,2,6,10,1,5,6,11,12,10,9,6,1,2,5,1}
|
||||
1.00 | {9,5,7,5,9,11,11,4,7,7,4,7,4,2,6,3,5}
|
||||
2.00 | {11,10,8,5,5,12,3,11,7,11,5,7,6,6,10,1,12,6,5}
|
||||
3.00 | {4,9,8,11,7,10,6,7,8,5,8,9,11,3}
|
||||
4.00 | {1,5,6,11,12,10,9,6,1,2,5,1,11,6,2,8,2,6,10}
|
||||
(4 rows)
|
||||
|
||||
SELECT l_quantity, array_agg(l_orderkey * 2 + 1) FROM lineitem WHERE l_quantity < 5
|
||||
|
@ -118,10 +120,10 @@ SELECT l_quantity, array_agg(l_orderkey * 2 + 1) FROM lineitem WHERE l_quantity
|
|||
AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity;
|
||||
l_quantity | array_agg
|
||||
------------+---------------------------------------------
|
||||
1.00 | {18317,18445,11269,11397,11713,11715,11973}
|
||||
2.00 | {18061,18247,18953,11847}
|
||||
1.00 | {11269,11397,11713,11715,11973,18317,18445}
|
||||
2.00 | {11847,18061,18247,18953}
|
||||
3.00 | {18249,18315,18699,18951,18955}
|
||||
4.00 | {18241,18765,11653,11659}
|
||||
4.00 | {11653,11659,18241,18765}
|
||||
(4 rows)
|
||||
|
||||
-- Check that we can execute array_agg() with an expression containing NULL values
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
-- This test checks that the group-by columns don't need to be above an average
|
||||
-- expression, and can be anywhere in the projection order. This is in response
|
||||
-- to a bug we had due to the average expression introducing new columns.
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 450000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 450000;
|
||||
SELECT
|
||||
sum(l_quantity) as sum_qty,
|
||||
sum(l_extendedprice) as sum_base_price,
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- MULTI_BASIC_QUERIES
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 440000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 440000;
|
||||
-- Execute simple sum, average, and count queries on data recently uploaded to
|
||||
-- our partitioned table.
|
||||
SELECT count(*) FROM lineitem;
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- MULTI_BINARY_MASTER_COPY
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 430000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 430000;
|
||||
-- Try binary master copy for different executors
|
||||
SET citus.binary_master_copy_format TO 'on';
|
||||
SET citus.task_executor_type TO 'task-tracker';
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- MULTI_COMPLEX_EXPRESSIONS
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 420000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 420000;
|
||||
-- Check that we can correctly handle complex expressions and aggregates.
|
||||
SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem;
|
||||
?column?
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 410000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 410000;
|
||||
-- ===================================================================
|
||||
-- create test functions
|
||||
-- ===================================================================
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- MULTI_COUNT_TYPE_CONVERSION
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 400000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 400000;
|
||||
-- Verify that we can sort count(*) results correctly. We perform this check as
|
||||
-- our count() operations execute in two steps: worker nodes report their
|
||||
-- count() results, and the master node sums these counts up. During this sum(),
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 390000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 390000;
|
||||
-- ===================================================================
|
||||
-- get ready for the foreign data wrapper tests
|
||||
-- ===================================================================
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 380000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 380000;
|
||||
-- ===================================================================
|
||||
-- test INSERT proxy creation functionality
|
||||
-- ===================================================================
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 370000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 370000;
|
||||
-- ===================================================================
|
||||
-- create test functions and types needed for tests
|
||||
-- ===================================================================
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- MULTI_CREATE_TABLE
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 360000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 360000;
|
||||
-- Create new table definitions for use in testing in distributed planning and
|
||||
-- execution functionality. Also create indexes to boost performance.
|
||||
CREATE TABLE lineitem (
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
-- test composite type, varchar and enum types
|
||||
-- create, distribute, INSERT, SELECT and UPDATE
|
||||
-- ===================================================================
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 530000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 530000;
|
||||
-- create a custom type...
|
||||
CREATE TYPE test_composite_type AS (
|
||||
i integer,
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
-- ===================================================================
|
||||
-- create test functions
|
||||
-- ===================================================================
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 540000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 540000;
|
||||
CREATE FUNCTION load_shard_id_array(regclass)
|
||||
RETURNS bigint[]
|
||||
AS 'citus'
|
||||
|
@ -75,16 +77,16 @@ SELECT master_create_worker_shards('events_hash', 4, 2);
|
|||
(1 row)
|
||||
|
||||
-- set shardstate of one replication from each shard to 0 (invalid value)
|
||||
UPDATE pg_dist_shard_placement SET shardstate = 0 WHERE nodeport = 57638 AND shardid BETWEEN 103025 AND 103028;
|
||||
UPDATE pg_dist_shard_placement SET shardstate = 0 WHERE nodeport = 57638 AND shardid BETWEEN 540000 AND 540003;
|
||||
-- should see above shard identifiers
|
||||
SELECT load_shard_id_array('events_hash');
|
||||
load_shard_id_array
|
||||
-------------------------------
|
||||
{103025,103026,103027,103028}
|
||||
{540000,540001,540002,540003}
|
||||
(1 row)
|
||||
|
||||
-- should see array with first shard range
|
||||
SELECT load_shard_interval_array(103025, 0);
|
||||
SELECT load_shard_interval_array(540000, 0);
|
||||
load_shard_interval_array
|
||||
---------------------------
|
||||
{-2147483648,-1073741825}
|
||||
|
@ -106,42 +108,41 @@ SELECT master_create_distributed_table('events_range', 'name', 'range');
|
|||
SELECT master_create_empty_shard('events_range');
|
||||
master_create_empty_shard
|
||||
---------------------------
|
||||
103029
|
||||
540004
|
||||
(1 row)
|
||||
|
||||
UPDATE pg_dist_shard SET
|
||||
shardminvalue = 'Aardvark',
|
||||
shardmaxvalue = 'Zebra'
|
||||
WHERE shardid = 103029;
|
||||
SELECT load_shard_interval_array(103029, ''::text);
|
||||
WHERE shardid = 540004;
|
||||
SELECT load_shard_interval_array(540004, ''::text);
|
||||
load_shard_interval_array
|
||||
---------------------------
|
||||
{Aardvark,Zebra}
|
||||
(1 row)
|
||||
|
||||
-- should see error for non-existent shard
|
||||
SELECT load_shard_interval_array(103030, 0);
|
||||
ERROR: could not find valid entry for shard 103030
|
||||
SELECT load_shard_interval_array(540005, 0);
|
||||
ERROR: could not find valid entry for shard 540005
|
||||
-- should see two placements
|
||||
SELECT load_shard_placement_array(103026, false);
|
||||
SELECT load_shard_placement_array(540001, false);
|
||||
load_shard_placement_array
|
||||
-----------------------------------
|
||||
{localhost:57637,localhost:57638}
|
||||
(1 row)
|
||||
|
||||
-- only one of which is finalized
|
||||
SELECT load_shard_placement_array(103026, true);
|
||||
SELECT load_shard_placement_array(540001, true);
|
||||
load_shard_placement_array
|
||||
----------------------------
|
||||
{localhost:57637}
|
||||
(1 row)
|
||||
|
||||
-- should see error for non-existent shard
|
||||
SELECT load_shard_placement_array(103031, false);
|
||||
WARNING: could not find any shard placements for shardId 103031
|
||||
load_shard_placement_array
|
||||
----------------------------
|
||||
{}
|
||||
SELECT load_shard_placement_array(540001, false);
|
||||
load_shard_placement_array
|
||||
-----------------------------------
|
||||
{localhost:57637,localhost:57638}
|
||||
(1 row)
|
||||
|
||||
-- should see column id of 'name'
|
||||
|
@ -192,7 +193,7 @@ SELECT column_name_to_column_id('events_hash', 'non_existent');
|
|||
ERROR: column "non_existent" of relation "events_hash" does not exist
|
||||
-- drop shard rows (must drop placements first)
|
||||
DELETE FROM pg_dist_shard_placement
|
||||
WHERE shardid BETWEEN 103025 AND 103029;
|
||||
WHERE shardid BETWEEN 540000 AND 540004;
|
||||
DELETE FROM pg_dist_shard
|
||||
WHERE logicalrelid = 'events_hash'::regclass;
|
||||
DELETE FROM pg_dist_shard
|
||||
|
@ -275,9 +276,9 @@ WHERE shardid = :new_shard_id AND nodename = 'localhost' and nodeport = 5432;
|
|||
|
||||
-- deleting or updating a non-existent row should fail
|
||||
SELECT delete_shard_placement_row(:new_shard_id, 'wrong_localhost', 5432);
|
||||
ERROR: could not find valid entry for shard placement 103030 on node "wrong_localhost:5432"
|
||||
ERROR: could not find valid entry for shard placement 540005 on node "wrong_localhost:5432"
|
||||
SELECT update_shard_placement_row_state(:new_shard_id, 'localhost', 5432, 3);
|
||||
ERROR: could not find valid entry for shard placement 103030 on node "localhost:5432"
|
||||
ERROR: could not find valid entry for shard placement 540005 on node "localhost:5432"
|
||||
-- now we'll even test our lock methods...
|
||||
-- use transaction to bound how long we hold the lock
|
||||
BEGIN;
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
-- MULTI_DROP_EXTENSION
|
||||
--
|
||||
-- Tests around dropping and recreating the extension
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 550000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 550000;
|
||||
CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL);
|
||||
SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append');
|
||||
master_create_distributed_table
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
-- Tests that check that our query functionality behaves as expected when the
|
||||
-- table schema is modified via ALTER statements.
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 620000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 620000;
|
||||
SELECT count(*) FROM customer;
|
||||
count
|
||||
-------
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- MULTI_EXPLAIN
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 570000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 570000;
|
||||
\a\t
|
||||
SET citus.task_executor_type TO 'real-time';
|
||||
SET citus.explain_distributed_queries TO on;
|
||||
|
@ -30,7 +32,7 @@ $BODY$ LANGUAGE plpgsql;
|
|||
EXPLAIN (COSTS FALSE, FORMAT TEXT)
|
||||
SELECT l_quantity, count(*) count_quantity FROM lineitem
|
||||
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
|
||||
Distributed Query into pg_merge_job_0040
|
||||
Distributed Query into pg_merge_job_570000
|
||||
Executor: Real-Time
|
||||
Task Count: 6
|
||||
Tasks Shown: One of 6
|
||||
|
@ -38,13 +40,13 @@ Distributed Query into pg_merge_job_0040
|
|||
Node: host=localhost port=57637 dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: l_quantity
|
||||
-> Seq Scan on lineitem_102010 lineitem
|
||||
-> Seq Scan on lineitem_290000 lineitem
|
||||
Master Query
|
||||
-> Sort
|
||||
Sort Key: (sum(((sum(intermediate_column_40_1))::bigint)))::bigint, intermediate_column_40_0
|
||||
Sort Key: (sum(((sum(intermediate_column_570000_1))::bigint)))::bigint, intermediate_column_570000_0
|
||||
-> HashAggregate
|
||||
Group Key: intermediate_column_40_0
|
||||
-> Seq Scan on pg_merge_job_0040
|
||||
Group Key: intermediate_column_570000_0
|
||||
-> Seq Scan on pg_merge_job_570000
|
||||
-- Test JSON format
|
||||
EXPLAIN (COSTS FALSE, FORMAT JSON)
|
||||
SELECT l_quantity, count(*) count_quantity FROM lineitem
|
||||
|
@ -69,7 +71,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
|
|||
{
|
||||
"Node Type": "Seq Scan",
|
||||
"Parent Relationship": "Outer",
|
||||
"Relation Name": "lineitem_102010",
|
||||
"Relation Name": "lineitem_290000",
|
||||
"Alias": "lineitem"
|
||||
}
|
||||
]
|
||||
|
@ -85,19 +87,19 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
|
|||
{
|
||||
"Plan": {
|
||||
"Node Type": "Sort",
|
||||
"Sort Key": ["(sum(((sum(intermediate_column_41_1))::bigint)))::bigint", "intermediate_column_41_0"],
|
||||
"Sort Key": ["(sum(((sum(intermediate_column_570001_1))::bigint)))::bigint", "intermediate_column_570001_0"],
|
||||
"Plans": [
|
||||
{
|
||||
"Node Type": "Aggregate",
|
||||
"Strategy": "Hashed",
|
||||
"Parent Relationship": "Outer",
|
||||
"Group Key": ["intermediate_column_41_0"],
|
||||
"Group Key": ["intermediate_column_570001_0"],
|
||||
"Plans": [
|
||||
{
|
||||
"Node Type": "Seq Scan",
|
||||
"Parent Relationship": "Outer",
|
||||
"Relation Name": "pg_merge_job_0041",
|
||||
"Alias": "pg_merge_job_0041"
|
||||
"Relation Name": "pg_merge_job_570001",
|
||||
"Alias": "pg_merge_job_570001"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
@ -138,7 +140,7 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
|
|||
<Plan>
|
||||
<Node-Type>Seq Scan</Node-Type>
|
||||
<Parent-Relationship>Outer</Parent-Relationship>
|
||||
<Relation-Name>lineitem_102010</Relation-Name>
|
||||
<Relation-Name>lineitem_290000</Relation-Name>
|
||||
<Alias>lineitem</Alias>
|
||||
</Plan>
|
||||
</Plans>
|
||||
|
@ -154,8 +156,8 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
|
|||
<Plan>
|
||||
<Node-Type>Sort</Node-Type>
|
||||
<Sort-Key>
|
||||
<Item>(sum(((sum(intermediate_column_43_1))::bigint)))::bigint</Item>
|
||||
<Item>intermediate_column_43_0</Item>
|
||||
<Item>(sum(((sum(intermediate_column_570003_1))::bigint)))::bigint</Item>
|
||||
<Item>intermediate_column_570003_0</Item>
|
||||
</Sort-Key>
|
||||
<Plans>
|
||||
<Plan>
|
||||
|
@ -163,14 +165,14 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
|
|||
<Strategy>Hashed</Strategy>
|
||||
<Parent-Relationship>Outer</Parent-Relationship>
|
||||
<Group-Key>
|
||||
<Item>intermediate_column_43_0</Item>
|
||||
<Item>intermediate_column_570003_0</Item>
|
||||
</Group-Key>
|
||||
<Plans>
|
||||
<Plan>
|
||||
<Node-Type>Seq Scan</Node-Type>
|
||||
<Parent-Relationship>Outer</Parent-Relationship>
|
||||
<Relation-Name>pg_merge_job_0043</Relation-Name>
|
||||
<Alias>pg_merge_job_0043</Alias>
|
||||
<Relation-Name>pg_merge_job_570003</Relation-Name>
|
||||
<Alias>pg_merge_job_570003</Alias>
|
||||
</Plan>
|
||||
</Plans>
|
||||
</Plan>
|
||||
|
@ -204,31 +206,31 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
|
|||
Plans:
|
||||
- Node Type: "Seq Scan"
|
||||
Parent Relationship: "Outer"
|
||||
Relation Name: "lineitem_102010"
|
||||
Relation Name: "lineitem_290000"
|
||||
Alias: "lineitem"
|
||||
|
||||
Master Query:
|
||||
- Plan:
|
||||
Node Type: "Sort"
|
||||
Sort Key:
|
||||
- "(sum(((sum(intermediate_column_45_1))::bigint)))::bigint"
|
||||
- "intermediate_column_45_0"
|
||||
- "(sum(((sum(intermediate_column_570005_1))::bigint)))::bigint"
|
||||
- "intermediate_column_570005_0"
|
||||
Plans:
|
||||
- Node Type: "Aggregate"
|
||||
Strategy: "Hashed"
|
||||
Parent Relationship: "Outer"
|
||||
Group Key:
|
||||
- "intermediate_column_45_0"
|
||||
- "intermediate_column_570005_0"
|
||||
Plans:
|
||||
- Node Type: "Seq Scan"
|
||||
Parent Relationship: "Outer"
|
||||
Relation Name: "pg_merge_job_0045"
|
||||
Alias: "pg_merge_job_0045"
|
||||
Relation Name: "pg_merge_job_570005"
|
||||
Alias: "pg_merge_job_570005"
|
||||
-- Test Text format
|
||||
EXPLAIN (COSTS FALSE, FORMAT TEXT)
|
||||
SELECT l_quantity, count(*) count_quantity FROM lineitem
|
||||
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
|
||||
Distributed Query into pg_merge_job_0046
|
||||
Distributed Query into pg_merge_job_570006
|
||||
Executor: Real-Time
|
||||
Task Count: 6
|
||||
Tasks Shown: One of 6
|
||||
|
@ -236,17 +238,17 @@ Distributed Query into pg_merge_job_0046
|
|||
Node: host=localhost port=57637 dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: l_quantity
|
||||
-> Seq Scan on lineitem_102010 lineitem
|
||||
-> Seq Scan on lineitem_290000 lineitem
|
||||
Master Query
|
||||
-> Sort
|
||||
Sort Key: (sum(((sum(intermediate_column_46_1))::bigint)))::bigint, intermediate_column_46_0
|
||||
Sort Key: (sum(((sum(intermediate_column_570006_1))::bigint)))::bigint, intermediate_column_570006_0
|
||||
-> HashAggregate
|
||||
Group Key: intermediate_column_46_0
|
||||
-> Seq Scan on pg_merge_job_0046
|
||||
Group Key: intermediate_column_570006_0
|
||||
-> Seq Scan on pg_merge_job_570006
|
||||
-- Test verbose
|
||||
EXPLAIN (COSTS FALSE, VERBOSE TRUE)
|
||||
SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem;
|
||||
Distributed Query into pg_merge_job_0047
|
||||
Distributed Query into pg_merge_job_570007
|
||||
Executor: Real-Time
|
||||
Task Count: 6
|
||||
Tasks Shown: One of 6
|
||||
|
@ -254,19 +256,19 @@ Distributed Query into pg_merge_job_0047
|
|||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
Output: sum(l_quantity), sum(l_quantity), count(l_quantity)
|
||||
-> Seq Scan on public.lineitem_102010 lineitem
|
||||
-> Seq Scan on public.lineitem_290000 lineitem
|
||||
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
|
||||
Master Query
|
||||
-> Aggregate
|
||||
Output: (sum(intermediate_column_47_0) / (sum(intermediate_column_47_1) / sum(intermediate_column_47_2)))
|
||||
-> Seq Scan on pg_temp_2.pg_merge_job_0047
|
||||
Output: intermediate_column_47_0, intermediate_column_47_1, intermediate_column_47_2
|
||||
Output: (sum(intermediate_column_570007_0) / (sum(intermediate_column_570007_1) / sum(intermediate_column_570007_2)))
|
||||
-> Seq Scan on pg_temp_2.pg_merge_job_570007
|
||||
Output: intermediate_column_570007_0, intermediate_column_570007_1, intermediate_column_570007_2
|
||||
-- Test join
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT * FROM lineitem
|
||||
JOIN orders ON l_orderkey = o_orderkey AND l_quantity < 5
|
||||
ORDER BY l_quantity DESC LIMIT 10;
|
||||
Distributed Query into pg_merge_job_0048
|
||||
Distributed Query into pg_merge_job_570008
|
||||
Executor: Real-Time
|
||||
Task Count: 6
|
||||
Tasks Shown: One of 6
|
||||
|
@ -277,15 +279,15 @@ Distributed Query into pg_merge_job_0048
|
|||
Sort Key: lineitem.l_quantity DESC
|
||||
-> Hash Join
|
||||
Hash Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Seq Scan on lineitem_102010 lineitem
|
||||
-> Seq Scan on lineitem_290000 lineitem
|
||||
Filter: (l_quantity < '5'::numeric)
|
||||
-> Hash
|
||||
-> Seq Scan on orders_102015 orders
|
||||
-> Seq Scan on orders_290006 orders
|
||||
Master Query
|
||||
-> Limit
|
||||
-> Sort
|
||||
Sort Key: intermediate_column_48_4 DESC
|
||||
-> Seq Scan on pg_merge_job_0048
|
||||
Sort Key: intermediate_column_570008_4 DESC
|
||||
-> Seq Scan on pg_merge_job_570008
|
||||
-- Test insert
|
||||
EXPLAIN (COSTS FALSE)
|
||||
INSERT INTO lineitem VALUES(1,0);
|
||||
|
@ -294,8 +296,8 @@ Distributed Query
|
|||
Task Count: 1
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Insert on lineitem_102009
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Insert on lineitem_290000
|
||||
-> Result
|
||||
-- Test update
|
||||
EXPLAIN (COSTS FALSE)
|
||||
|
@ -307,12 +309,12 @@ Distributed Query
|
|||
Task Count: 1
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Update on lineitem_102009
|
||||
-> Bitmap Heap Scan on lineitem_102009
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Update on lineitem_290000
|
||||
-> Bitmap Heap Scan on lineitem_290000
|
||||
Recheck Cond: (l_orderkey = 1)
|
||||
Filter: (l_partkey = 0)
|
||||
-> Bitmap Index Scan on lineitem_pkey_102009
|
||||
-> Bitmap Index Scan on lineitem_pkey_290000
|
||||
Index Cond: (l_orderkey = 1)
|
||||
-- Test delete
|
||||
EXPLAIN (COSTS FALSE)
|
||||
|
@ -323,25 +325,25 @@ Distributed Query
|
|||
Task Count: 1
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Delete on lineitem_102009
|
||||
-> Bitmap Heap Scan on lineitem_102009
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Delete on lineitem_290000
|
||||
-> Bitmap Heap Scan on lineitem_290000
|
||||
Recheck Cond: (l_orderkey = 1)
|
||||
Filter: (l_partkey = 0)
|
||||
-> Bitmap Index Scan on lineitem_pkey_102009
|
||||
-> Bitmap Index Scan on lineitem_pkey_290000
|
||||
Index Cond: (l_orderkey = 1)
|
||||
-- Test single-shard SELECT
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT l_quantity FROM lineitem WHERE l_orderkey = 5;
|
||||
Distributed Query into pg_merge_job_0049
|
||||
Distributed Query into pg_merge_job_570009
|
||||
Executor: Router
|
||||
Task Count: 1
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Bitmap Heap Scan on lineitem_102009 lineitem
|
||||
-> Bitmap Heap Scan on lineitem_290000 lineitem
|
||||
Recheck Cond: (l_orderkey = 5)
|
||||
-> Bitmap Index Scan on lineitem_pkey_102009
|
||||
-> Bitmap Index Scan on lineitem_pkey_290000
|
||||
Index Cond: (l_orderkey = 5)
|
||||
SELECT true AS valid FROM explain_xml($$
|
||||
SELECT l_quantity FROM lineitem WHERE l_orderkey = 5$$);
|
||||
|
@ -353,41 +355,41 @@ t
|
|||
EXPLAIN (COSTS FALSE)
|
||||
CREATE TABLE explain_result AS
|
||||
SELECT * FROM lineitem;
|
||||
Distributed Query into pg_merge_job_0052
|
||||
Distributed Query into pg_merge_job_570012
|
||||
Executor: Real-Time
|
||||
Task Count: 6
|
||||
Tasks Shown: One of 6
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Seq Scan on lineitem_102010 lineitem
|
||||
-> Seq Scan on lineitem_290000 lineitem
|
||||
Master Query
|
||||
-> Seq Scan on pg_merge_job_0052
|
||||
-> Seq Scan on pg_merge_job_570012
|
||||
-- Test all tasks output
|
||||
SET citus.explain_all_tasks TO on;
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
|
||||
Distributed Query into pg_merge_job_0053
|
||||
Distributed Query into pg_merge_job_570013
|
||||
Executor: Real-Time
|
||||
Task Count: 3
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_102012 lineitem
|
||||
-> Seq Scan on lineitem_290004 lineitem
|
||||
Filter: (l_orderkey > 9030)
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_102013 lineitem
|
||||
-> Seq Scan on lineitem_290003 lineitem
|
||||
Filter: (l_orderkey > 9030)
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_102014 lineitem
|
||||
-> Seq Scan on lineitem_290005 lineitem
|
||||
Filter: (l_orderkey > 9030)
|
||||
Master Query
|
||||
-> Aggregate
|
||||
-> Seq Scan on pg_merge_job_0053
|
||||
-> Seq Scan on pg_merge_job_570013
|
||||
SELECT true AS valid FROM explain_xml($$
|
||||
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030$$);
|
||||
t
|
||||
|
@ -399,18 +401,18 @@ SET citus.task_executor_type TO 'task-tracker';
|
|||
SET citus.explain_all_tasks TO off;
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
|
||||
Distributed Query into pg_merge_job_0056
|
||||
Distributed Query into pg_merge_job_570016
|
||||
Executor: Task-Tracker
|
||||
Task Count: 3
|
||||
Tasks Shown: One of 3
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_102012 lineitem
|
||||
-> Seq Scan on lineitem_290004 lineitem
|
||||
Filter: (l_orderkey > 9030)
|
||||
Master Query
|
||||
-> Aggregate
|
||||
-> Seq Scan on pg_merge_job_0056
|
||||
-> Seq Scan on pg_merge_job_570016
|
||||
-- Test re-partition join
|
||||
SET citus.large_table_shard_count TO 1;
|
||||
EXPLAIN (COSTS FALSE)
|
||||
|
@ -419,7 +421,7 @@ EXPLAIN (COSTS FALSE)
|
|||
WHERE l_orderkey = o_orderkey
|
||||
AND o_custkey = c_custkey
|
||||
AND l_suppkey = s_suppkey;
|
||||
Distributed Query into pg_merge_job_0059
|
||||
Distributed Query into pg_merge_job_570019
|
||||
Executor: Task-Tracker
|
||||
Task Count: 1
|
||||
Tasks Shown: None, not supported for re-partition queries
|
||||
|
@ -431,7 +433,7 @@ Distributed Query into pg_merge_job_0059
|
|||
Merge Task Count: 1
|
||||
Master Query
|
||||
-> Aggregate
|
||||
-> Seq Scan on pg_merge_job_0059
|
||||
-> Seq Scan on pg_merge_job_570019
|
||||
EXPLAIN (COSTS FALSE, FORMAT JSON)
|
||||
SELECT count(*)
|
||||
FROM lineitem, orders, customer, supplier
|
||||
|
@ -466,8 +468,8 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
|
|||
{
|
||||
"Node Type": "Seq Scan",
|
||||
"Parent Relationship": "Outer",
|
||||
"Relation Name": "pg_merge_job_0062",
|
||||
"Alias": "pg_merge_job_0062"
|
||||
"Relation Name": "pg_merge_job_570022",
|
||||
"Alias": "pg_merge_job_570022"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
@ -516,8 +518,8 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
|
|||
<Plan>
|
||||
<Node-Type>Seq Scan</Node-Type>
|
||||
<Parent-Relationship>Outer</Parent-Relationship>
|
||||
<Relation-Name>pg_merge_job_0068</Relation-Name>
|
||||
<Alias>pg_merge_job_0068</Alias>
|
||||
<Relation-Name>pg_merge_job_570028</Relation-Name>
|
||||
<Alias>pg_merge_job_570028</Alias>
|
||||
</Plan>
|
||||
</Plans>
|
||||
</Plan>
|
||||
|
@ -555,5 +557,5 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
|
|||
Plans:
|
||||
- Node Type: "Seq Scan"
|
||||
Parent Relationship: "Outer"
|
||||
Relation Name: "pg_merge_job_0074"
|
||||
Alias: "pg_merge_job_0074"
|
||||
Relation Name: "pg_merge_job_570034"
|
||||
Alias: "pg_merge_job_570034"
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- MULTI_EXPLAIN
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 570000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 570000;
|
||||
\a\t
|
||||
SET citus.task_executor_type TO 'real-time';
|
||||
SET citus.explain_distributed_queries TO on;
|
||||
|
@ -30,7 +32,7 @@ $BODY$ LANGUAGE plpgsql;
|
|||
EXPLAIN (COSTS FALSE, FORMAT TEXT)
|
||||
SELECT l_quantity, count(*) count_quantity FROM lineitem
|
||||
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
|
||||
Distributed Query into pg_merge_job_0040
|
||||
Distributed Query into pg_merge_job_570000
|
||||
Executor: Real-Time
|
||||
Task Count: 6
|
||||
Tasks Shown: One of 6
|
||||
|
@ -38,13 +40,13 @@ Distributed Query into pg_merge_job_0040
|
|||
Node: host=localhost port=57637 dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: l_quantity
|
||||
-> Seq Scan on lineitem_102010 lineitem
|
||||
-> Seq Scan on lineitem_290000 lineitem
|
||||
Master Query
|
||||
-> Sort
|
||||
Sort Key: (sum(((sum(intermediate_column_40_1))::bigint)))::bigint, intermediate_column_40_0
|
||||
Sort Key: (sum(((sum(intermediate_column_570000_1))::bigint)))::bigint, intermediate_column_570000_0
|
||||
-> HashAggregate
|
||||
Group Key: intermediate_column_40_0
|
||||
-> Seq Scan on pg_merge_job_0040
|
||||
Group Key: intermediate_column_570000_0
|
||||
-> Seq Scan on pg_merge_job_570000
|
||||
-- Test JSON format
|
||||
EXPLAIN (COSTS FALSE, FORMAT JSON)
|
||||
SELECT l_quantity, count(*) count_quantity FROM lineitem
|
||||
|
@ -69,7 +71,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
|
|||
{
|
||||
"Node Type": "Seq Scan",
|
||||
"Parent Relationship": "Outer",
|
||||
"Relation Name": "lineitem_102010",
|
||||
"Relation Name": "lineitem_290000",
|
||||
"Alias": "lineitem"
|
||||
}
|
||||
]
|
||||
|
@ -85,19 +87,19 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
|
|||
{
|
||||
"Plan": {
|
||||
"Node Type": "Sort",
|
||||
"Sort Key": ["(sum(((sum(intermediate_column_41_1))::bigint)))::bigint", "intermediate_column_41_0"],
|
||||
"Sort Key": ["(sum(((sum(intermediate_column_570001_1))::bigint)))::bigint", "intermediate_column_570001_0"],
|
||||
"Plans": [
|
||||
{
|
||||
"Node Type": "Aggregate",
|
||||
"Strategy": "Hashed",
|
||||
"Parent Relationship": "Outer",
|
||||
"Group Key": ["intermediate_column_41_0"],
|
||||
"Group Key": ["intermediate_column_570001_0"],
|
||||
"Plans": [
|
||||
{
|
||||
"Node Type": "Seq Scan",
|
||||
"Parent Relationship": "Outer",
|
||||
"Relation Name": "pg_merge_job_0041",
|
||||
"Alias": "pg_merge_job_0041"
|
||||
"Relation Name": "pg_merge_job_570001",
|
||||
"Alias": "pg_merge_job_570001"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
@ -138,7 +140,7 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
|
|||
<Plan>
|
||||
<Node-Type>Seq Scan</Node-Type>
|
||||
<Parent-Relationship>Outer</Parent-Relationship>
|
||||
<Relation-Name>lineitem_102010</Relation-Name>
|
||||
<Relation-Name>lineitem_290000</Relation-Name>
|
||||
<Alias>lineitem</Alias>
|
||||
</Plan>
|
||||
</Plans>
|
||||
|
@ -154,8 +156,8 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
|
|||
<Plan>
|
||||
<Node-Type>Sort</Node-Type>
|
||||
<Sort-Key>
|
||||
<Item>(sum(((sum(intermediate_column_43_1))::bigint)))::bigint</Item>
|
||||
<Item>intermediate_column_43_0</Item>
|
||||
<Item>(sum(((sum(intermediate_column_570003_1))::bigint)))::bigint</Item>
|
||||
<Item>intermediate_column_570003_0</Item>
|
||||
</Sort-Key>
|
||||
<Plans>
|
||||
<Plan>
|
||||
|
@ -163,14 +165,14 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
|
|||
<Strategy>Hashed</Strategy>
|
||||
<Parent-Relationship>Outer</Parent-Relationship>
|
||||
<Group-Key>
|
||||
<Item>intermediate_column_43_0</Item>
|
||||
<Item>intermediate_column_570003_0</Item>
|
||||
</Group-Key>
|
||||
<Plans>
|
||||
<Plan>
|
||||
<Node-Type>Seq Scan</Node-Type>
|
||||
<Parent-Relationship>Outer</Parent-Relationship>
|
||||
<Relation-Name>pg_merge_job_0043</Relation-Name>
|
||||
<Alias>pg_merge_job_0043</Alias>
|
||||
<Relation-Name>pg_merge_job_570003</Relation-Name>
|
||||
<Alias>pg_merge_job_570003</Alias>
|
||||
</Plan>
|
||||
</Plans>
|
||||
</Plan>
|
||||
|
@ -204,31 +206,31 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
|
|||
Plans:
|
||||
- Node Type: "Seq Scan"
|
||||
Parent Relationship: "Outer"
|
||||
Relation Name: "lineitem_102010"
|
||||
Relation Name: "lineitem_290000"
|
||||
Alias: "lineitem"
|
||||
|
||||
Master Query:
|
||||
- Plan:
|
||||
Node Type: "Sort"
|
||||
Sort Key:
|
||||
- "(sum(((sum(intermediate_column_45_1))::bigint)))::bigint"
|
||||
- "intermediate_column_45_0"
|
||||
- "(sum(((sum(intermediate_column_570005_1))::bigint)))::bigint"
|
||||
- "intermediate_column_570005_0"
|
||||
Plans:
|
||||
- Node Type: "Aggregate"
|
||||
Strategy: "Hashed"
|
||||
Parent Relationship: "Outer"
|
||||
Group Key:
|
||||
- "intermediate_column_45_0"
|
||||
- "intermediate_column_570005_0"
|
||||
Plans:
|
||||
- Node Type: "Seq Scan"
|
||||
Parent Relationship: "Outer"
|
||||
Relation Name: "pg_merge_job_0045"
|
||||
Alias: "pg_merge_job_0045"
|
||||
Relation Name: "pg_merge_job_570005"
|
||||
Alias: "pg_merge_job_570005"
|
||||
-- Test Text format
|
||||
EXPLAIN (COSTS FALSE, FORMAT TEXT)
|
||||
SELECT l_quantity, count(*) count_quantity FROM lineitem
|
||||
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
|
||||
Distributed Query into pg_merge_job_0046
|
||||
Distributed Query into pg_merge_job_570006
|
||||
Executor: Real-Time
|
||||
Task Count: 6
|
||||
Tasks Shown: One of 6
|
||||
|
@ -236,17 +238,17 @@ Distributed Query into pg_merge_job_0046
|
|||
Node: host=localhost port=57637 dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: l_quantity
|
||||
-> Seq Scan on lineitem_102010 lineitem
|
||||
-> Seq Scan on lineitem_290000 lineitem
|
||||
Master Query
|
||||
-> Sort
|
||||
Sort Key: (sum(((sum(intermediate_column_46_1))::bigint)))::bigint, intermediate_column_46_0
|
||||
Sort Key: (sum(((sum(intermediate_column_570006_1))::bigint)))::bigint, intermediate_column_570006_0
|
||||
-> HashAggregate
|
||||
Group Key: intermediate_column_46_0
|
||||
-> Seq Scan on pg_merge_job_0046
|
||||
Group Key: intermediate_column_570006_0
|
||||
-> Seq Scan on pg_merge_job_570006
|
||||
-- Test verbose
|
||||
EXPLAIN (COSTS FALSE, VERBOSE TRUE)
|
||||
SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem;
|
||||
Distributed Query into pg_merge_job_0047
|
||||
Distributed Query into pg_merge_job_570007
|
||||
Executor: Real-Time
|
||||
Task Count: 6
|
||||
Tasks Shown: One of 6
|
||||
|
@ -254,19 +256,19 @@ Distributed Query into pg_merge_job_0047
|
|||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
Output: sum(l_quantity), sum(l_quantity), count(l_quantity)
|
||||
-> Seq Scan on public.lineitem_102010 lineitem
|
||||
-> Seq Scan on public.lineitem_290000 lineitem
|
||||
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
|
||||
Master Query
|
||||
-> Aggregate
|
||||
Output: (sum(intermediate_column_47_0) / (sum(intermediate_column_47_1) / sum(intermediate_column_47_2)))
|
||||
-> Seq Scan on pg_temp_2.pg_merge_job_0047
|
||||
Output: intermediate_column_47_0, intermediate_column_47_1, intermediate_column_47_2
|
||||
Output: (sum(intermediate_column_570007_0) / (sum(intermediate_column_570007_1) / sum(intermediate_column_570007_2)))
|
||||
-> Seq Scan on pg_temp_2.pg_merge_job_570007
|
||||
Output: intermediate_column_570007_0, intermediate_column_570007_1, intermediate_column_570007_2
|
||||
-- Test join
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT * FROM lineitem
|
||||
JOIN orders ON l_orderkey = o_orderkey AND l_quantity < 5
|
||||
ORDER BY l_quantity DESC LIMIT 10;
|
||||
Distributed Query into pg_merge_job_0048
|
||||
Distributed Query into pg_merge_job_570008
|
||||
Executor: Real-Time
|
||||
Task Count: 6
|
||||
Tasks Shown: One of 6
|
||||
|
@ -277,15 +279,15 @@ Distributed Query into pg_merge_job_0048
|
|||
Sort Key: lineitem.l_quantity
|
||||
-> Hash Join
|
||||
Hash Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Seq Scan on lineitem_102010 lineitem
|
||||
-> Seq Scan on lineitem_290000 lineitem
|
||||
Filter: (l_quantity < 5::numeric)
|
||||
-> Hash
|
||||
-> Seq Scan on orders_102015 orders
|
||||
-> Seq Scan on orders_290006 orders
|
||||
Master Query
|
||||
-> Limit
|
||||
-> Sort
|
||||
Sort Key: intermediate_column_48_4
|
||||
-> Seq Scan on pg_merge_job_0048
|
||||
Sort Key: intermediate_column_570008_4
|
||||
-> Seq Scan on pg_merge_job_570008
|
||||
-- Test insert
|
||||
EXPLAIN (COSTS FALSE)
|
||||
INSERT INTO lineitem VALUES(1,0);
|
||||
|
@ -294,8 +296,8 @@ Distributed Query
|
|||
Task Count: 1
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Insert on lineitem_102009
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Insert on lineitem_290000
|
||||
-> Result
|
||||
-- Test update
|
||||
EXPLAIN (COSTS FALSE)
|
||||
|
@ -307,12 +309,12 @@ Distributed Query
|
|||
Task Count: 1
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Update on lineitem_102009
|
||||
-> Bitmap Heap Scan on lineitem_102009
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Update on lineitem_290000
|
||||
-> Bitmap Heap Scan on lineitem_290000
|
||||
Recheck Cond: (l_orderkey = 1)
|
||||
Filter: (l_partkey = 0)
|
||||
-> Bitmap Index Scan on lineitem_pkey_102009
|
||||
-> Bitmap Index Scan on lineitem_pkey_290000
|
||||
Index Cond: (l_orderkey = 1)
|
||||
-- Test delete
|
||||
EXPLAIN (COSTS FALSE)
|
||||
|
@ -323,25 +325,25 @@ Distributed Query
|
|||
Task Count: 1
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Delete on lineitem_102009
|
||||
-> Bitmap Heap Scan on lineitem_102009
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Delete on lineitem_290000
|
||||
-> Bitmap Heap Scan on lineitem_290000
|
||||
Recheck Cond: (l_orderkey = 1)
|
||||
Filter: (l_partkey = 0)
|
||||
-> Bitmap Index Scan on lineitem_pkey_102009
|
||||
-> Bitmap Index Scan on lineitem_pkey_290000
|
||||
Index Cond: (l_orderkey = 1)
|
||||
-- Test single-shard SELECT
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT l_quantity FROM lineitem WHERE l_orderkey = 5;
|
||||
Distributed Query into pg_merge_job_0049
|
||||
Distributed Query into pg_merge_job_570009
|
||||
Executor: Router
|
||||
Task Count: 1
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Bitmap Heap Scan on lineitem_102009 lineitem
|
||||
-> Bitmap Heap Scan on lineitem_290000 lineitem
|
||||
Recheck Cond: (l_orderkey = 5)
|
||||
-> Bitmap Index Scan on lineitem_pkey_102009
|
||||
-> Bitmap Index Scan on lineitem_pkey_290000
|
||||
Index Cond: (l_orderkey = 5)
|
||||
SELECT true AS valid FROM explain_xml($$
|
||||
SELECT l_quantity FROM lineitem WHERE l_orderkey = 5$$);
|
||||
|
@ -353,41 +355,41 @@ t
|
|||
EXPLAIN (COSTS FALSE)
|
||||
CREATE TABLE explain_result AS
|
||||
SELECT * FROM lineitem;
|
||||
Distributed Query into pg_merge_job_0052
|
||||
Distributed Query into pg_merge_job_570012
|
||||
Executor: Real-Time
|
||||
Task Count: 6
|
||||
Tasks Shown: One of 6
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Seq Scan on lineitem_102010 lineitem
|
||||
-> Seq Scan on lineitem_290000 lineitem
|
||||
Master Query
|
||||
-> Seq Scan on pg_merge_job_0052
|
||||
-> Seq Scan on pg_merge_job_570012
|
||||
-- Test all tasks output
|
||||
SET citus.explain_all_tasks TO on;
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
|
||||
Distributed Query into pg_merge_job_0053
|
||||
Distributed Query into pg_merge_job_570013
|
||||
Executor: Real-Time
|
||||
Task Count: 3
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_102012 lineitem
|
||||
-> Seq Scan on lineitem_290004 lineitem
|
||||
Filter: (l_orderkey > 9030)
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_102013 lineitem
|
||||
-> Seq Scan on lineitem_290003 lineitem
|
||||
Filter: (l_orderkey > 9030)
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_102014 lineitem
|
||||
-> Seq Scan on lineitem_290005 lineitem
|
||||
Filter: (l_orderkey > 9030)
|
||||
Master Query
|
||||
-> Aggregate
|
||||
-> Seq Scan on pg_merge_job_0053
|
||||
-> Seq Scan on pg_merge_job_570013
|
||||
SELECT true AS valid FROM explain_xml($$
|
||||
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030$$);
|
||||
t
|
||||
|
@ -399,18 +401,18 @@ SET citus.task_executor_type TO 'task-tracker';
|
|||
SET citus.explain_all_tasks TO off;
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
|
||||
Distributed Query into pg_merge_job_0056
|
||||
Distributed Query into pg_merge_job_570016
|
||||
Executor: Task-Tracker
|
||||
Task Count: 3
|
||||
Tasks Shown: One of 3
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_102012 lineitem
|
||||
-> Seq Scan on lineitem_290004 lineitem
|
||||
Filter: (l_orderkey > 9030)
|
||||
Master Query
|
||||
-> Aggregate
|
||||
-> Seq Scan on pg_merge_job_0056
|
||||
-> Seq Scan on pg_merge_job_570016
|
||||
-- Test re-partition join
|
||||
SET citus.large_table_shard_count TO 1;
|
||||
EXPLAIN (COSTS FALSE)
|
||||
|
@ -419,7 +421,7 @@ EXPLAIN (COSTS FALSE)
|
|||
WHERE l_orderkey = o_orderkey
|
||||
AND o_custkey = c_custkey
|
||||
AND l_suppkey = s_suppkey;
|
||||
Distributed Query into pg_merge_job_0059
|
||||
Distributed Query into pg_merge_job_570019
|
||||
Executor: Task-Tracker
|
||||
Task Count: 1
|
||||
Tasks Shown: None, not supported for re-partition queries
|
||||
|
@ -431,7 +433,7 @@ Distributed Query into pg_merge_job_0059
|
|||
Merge Task Count: 1
|
||||
Master Query
|
||||
-> Aggregate
|
||||
-> Seq Scan on pg_merge_job_0059
|
||||
-> Seq Scan on pg_merge_job_570019
|
||||
EXPLAIN (COSTS FALSE, FORMAT JSON)
|
||||
SELECT count(*)
|
||||
FROM lineitem, orders, customer, supplier
|
||||
|
@ -466,8 +468,8 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
|
|||
{
|
||||
"Node Type": "Seq Scan",
|
||||
"Parent Relationship": "Outer",
|
||||
"Relation Name": "pg_merge_job_0062",
|
||||
"Alias": "pg_merge_job_0062"
|
||||
"Relation Name": "pg_merge_job_570022",
|
||||
"Alias": "pg_merge_job_570022"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
@ -516,8 +518,8 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
|
|||
<Plan>
|
||||
<Node-Type>Seq Scan</Node-Type>
|
||||
<Parent-Relationship>Outer</Parent-Relationship>
|
||||
<Relation-Name>pg_merge_job_0068</Relation-Name>
|
||||
<Alias>pg_merge_job_0068</Alias>
|
||||
<Relation-Name>pg_merge_job_570028</Relation-Name>
|
||||
<Alias>pg_merge_job_570028</Alias>
|
||||
</Plan>
|
||||
</Plans>
|
||||
</Plan>
|
||||
|
@ -555,5 +557,5 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
|
|||
Plans:
|
||||
- Node Type: "Seq Scan"
|
||||
Parent Relationship: "Outer"
|
||||
Relation Name: "pg_merge_job_0074"
|
||||
Alias: "pg_merge_job_0074"
|
||||
Relation Name: "pg_merge_job_570034"
|
||||
Alias: "pg_merge_job_570034"
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
--
|
||||
-- It'd be nice to script generation of this file, but alas, that's
|
||||
-- not done yet.
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 580000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 580000;
|
||||
-- DROP EXTENSION pre-created by the regression suite
|
||||
DROP EXTENSION citus;
|
||||
\c
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- MULTI_FDW_CREATE_TABLE
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 590000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 590000;
|
||||
-- Create new table definitions for use in testing in distributed foreign data
|
||||
-- wrapper functionality.
|
||||
SELECT fdwname FROM pg_foreign_data_wrapper;
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
-- MULTI_FDW_MASTER_PROTOCOL
|
||||
--
|
||||
-- Tests that check the metadata returned by the master node.
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 600000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 600000;
|
||||
SELECT part_storage_type, part_key, part_replica_count, part_max_size,
|
||||
part_placement_policy FROM master_get_table_metadata('lineitem');
|
||||
part_storage_type | part_key | part_replica_count | part_max_size | part_placement_policy
|
||||
|
@ -20,7 +22,7 @@ SELECT * FROM master_get_table_ddl_events('lineitem');
|
|||
SELECT * FROM master_get_new_shardid();
|
||||
master_get_new_shardid
|
||||
------------------------
|
||||
102008
|
||||
600000
|
||||
(1 row)
|
||||
|
||||
SELECT node_name FROM master_get_local_first_candidate_nodes();
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 610000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 610000;
|
||||
-- ===================================================================
|
||||
-- create test functions
|
||||
-- ===================================================================
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
-- MULTI_HASH_PRUNING
|
||||
--
|
||||
-- Tests for shard and join pruning logic on hash partitioned tables.
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 630000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 630000;
|
||||
-- Create a table partitioned on integer column and update partition type to
|
||||
-- hash. Then stage data to this table and update shard min max values with
|
||||
-- hashed ones. Hash value of 1, 2, 3 and 4 are consecutively -1905060026,
|
||||
|
@ -47,9 +49,9 @@ SELECT count(*) FROM orders_hash_partitioned;
|
|||
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 102034
|
||||
DEBUG: predicate pruning for shardId 102035
|
||||
DEBUG: predicate pruning for shardId 102036
|
||||
DEBUG: predicate pruning for shardId 630001
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
DEBUG: predicate pruning for shardId 630003
|
||||
DEBUG: Plan is router executable
|
||||
count
|
||||
-------
|
||||
|
@ -58,9 +60,9 @@ DEBUG: Plan is router executable
|
|||
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 2;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 102033
|
||||
DEBUG: predicate pruning for shardId 102034
|
||||
DEBUG: predicate pruning for shardId 102035
|
||||
DEBUG: predicate pruning for shardId 630000
|
||||
DEBUG: predicate pruning for shardId 630001
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
DEBUG: Plan is router executable
|
||||
count
|
||||
-------
|
||||
|
@ -69,9 +71,9 @@ DEBUG: Plan is router executable
|
|||
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 3;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 102033
|
||||
DEBUG: predicate pruning for shardId 102035
|
||||
DEBUG: predicate pruning for shardId 102036
|
||||
DEBUG: predicate pruning for shardId 630000
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
DEBUG: predicate pruning for shardId 630003
|
||||
DEBUG: Plan is router executable
|
||||
count
|
||||
-------
|
||||
|
@ -80,9 +82,9 @@ DEBUG: Plan is router executable
|
|||
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 4;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 102033
|
||||
DEBUG: predicate pruning for shardId 102035
|
||||
DEBUG: predicate pruning for shardId 102036
|
||||
DEBUG: predicate pruning for shardId 630000
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
DEBUG: predicate pruning for shardId 630003
|
||||
DEBUG: Plan is router executable
|
||||
count
|
||||
-------
|
||||
|
@ -92,9 +94,9 @@ DEBUG: Plan is router executable
|
|||
SELECT count(*) FROM orders_hash_partitioned
|
||||
WHERE o_orderkey = 1 AND o_clerk = 'aaa';
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 102034
|
||||
DEBUG: predicate pruning for shardId 102035
|
||||
DEBUG: predicate pruning for shardId 102036
|
||||
DEBUG: predicate pruning for shardId 630001
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
DEBUG: predicate pruning for shardId 630003
|
||||
DEBUG: Plan is router executable
|
||||
count
|
||||
-------
|
||||
|
@ -103,9 +105,9 @@ DEBUG: Plan is router executable
|
|||
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = abs(-1);
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 102034
|
||||
DEBUG: predicate pruning for shardId 102035
|
||||
DEBUG: predicate pruning for shardId 102036
|
||||
DEBUG: predicate pruning for shardId 630001
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
DEBUG: predicate pruning for shardId 630003
|
||||
DEBUG: Plan is router executable
|
||||
count
|
||||
-------
|
||||
|
@ -120,36 +122,36 @@ SELECT count(*) FROM orders_hash_partitioned;
|
|||
(1 row)
|
||||
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1;
|
||||
DEBUG: predicate pruning for shardId 102034
|
||||
DEBUG: predicate pruning for shardId 102035
|
||||
DEBUG: predicate pruning for shardId 102036
|
||||
DEBUG: predicate pruning for shardId 630001
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
DEBUG: predicate pruning for shardId 630003
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 2;
|
||||
DEBUG: predicate pruning for shardId 102033
|
||||
DEBUG: predicate pruning for shardId 102034
|
||||
DEBUG: predicate pruning for shardId 102035
|
||||
DEBUG: predicate pruning for shardId 630000
|
||||
DEBUG: predicate pruning for shardId 630001
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 3;
|
||||
DEBUG: predicate pruning for shardId 102033
|
||||
DEBUG: predicate pruning for shardId 102035
|
||||
DEBUG: predicate pruning for shardId 102036
|
||||
DEBUG: predicate pruning for shardId 630000
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
DEBUG: predicate pruning for shardId 630003
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 4;
|
||||
DEBUG: predicate pruning for shardId 102033
|
||||
DEBUG: predicate pruning for shardId 102035
|
||||
DEBUG: predicate pruning for shardId 102036
|
||||
DEBUG: predicate pruning for shardId 630000
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
DEBUG: predicate pruning for shardId 630003
|
||||
count
|
||||
-------
|
||||
0
|
||||
|
@ -157,18 +159,18 @@ DEBUG: predicate pruning for shardId 102036
|
|||
|
||||
SELECT count(*) FROM orders_hash_partitioned
|
||||
WHERE o_orderkey = 1 AND o_clerk = 'aaa';
|
||||
DEBUG: predicate pruning for shardId 102034
|
||||
DEBUG: predicate pruning for shardId 102035
|
||||
DEBUG: predicate pruning for shardId 102036
|
||||
DEBUG: predicate pruning for shardId 630001
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
DEBUG: predicate pruning for shardId 630003
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = abs(-1);
|
||||
DEBUG: predicate pruning for shardId 102034
|
||||
DEBUG: predicate pruning for shardId 102035
|
||||
DEBUG: predicate pruning for shardId 102036
|
||||
DEBUG: predicate pruning for shardId 630001
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
DEBUG: predicate pruning for shardId 630003
|
||||
count
|
||||
-------
|
||||
0
|
||||
|
@ -176,9 +178,9 @@ DEBUG: predicate pruning for shardId 102036
|
|||
|
||||
SET citus.task_executor_type TO :actual_task_executor;
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey is NULL;
|
||||
DEBUG: predicate pruning for shardId 102033
|
||||
DEBUG: predicate pruning for shardId 102034
|
||||
DEBUG: predicate pruning for shardId 102036
|
||||
DEBUG: predicate pruning for shardId 630000
|
||||
DEBUG: predicate pruning for shardId 630001
|
||||
DEBUG: predicate pruning for shardId 630003
|
||||
count
|
||||
-------
|
||||
0
|
||||
|
@ -198,8 +200,8 @@ SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey > 2;
|
|||
|
||||
SELECT count(*) FROM orders_hash_partitioned
|
||||
WHERE o_orderkey = 1 OR o_orderkey = 2;
|
||||
DEBUG: predicate pruning for shardId 102034
|
||||
DEBUG: predicate pruning for shardId 102035
|
||||
DEBUG: predicate pruning for shardId 630001
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
count
|
||||
-------
|
||||
0
|
||||
|
@ -214,8 +216,8 @@ SELECT count(*) FROM orders_hash_partitioned
|
|||
|
||||
SELECT count(*) FROM orders_hash_partitioned
|
||||
WHERE o_orderkey = 1 OR (o_orderkey = 3 AND o_clerk = 'aaa');
|
||||
DEBUG: predicate pruning for shardId 102035
|
||||
DEBUG: predicate pruning for shardId 102036
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
DEBUG: predicate pruning for shardId 630003
|
||||
count
|
||||
-------
|
||||
0
|
||||
|
@ -223,8 +225,8 @@ DEBUG: predicate pruning for shardId 102036
|
|||
|
||||
SELECT count(*) FROM orders_hash_partitioned
|
||||
WHERE o_orderkey = 1 OR o_orderkey is NULL;
|
||||
DEBUG: predicate pruning for shardId 102034
|
||||
DEBUG: predicate pruning for shardId 102036
|
||||
DEBUG: predicate pruning for shardId 630001
|
||||
DEBUG: predicate pruning for shardId 630003
|
||||
count
|
||||
-------
|
||||
0
|
||||
|
@ -232,9 +234,9 @@ DEBUG: predicate pruning for shardId 102036
|
|||
|
||||
SELECT count(*) FROM
|
||||
(SELECT o_orderkey FROM orders_hash_partitioned WHERE o_orderkey = 1) AS orderkeys;
|
||||
DEBUG: predicate pruning for shardId 102034
|
||||
DEBUG: predicate pruning for shardId 102035
|
||||
DEBUG: predicate pruning for shardId 102036
|
||||
DEBUG: predicate pruning for shardId 630001
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
DEBUG: predicate pruning for shardId 630003
|
||||
count
|
||||
-------
|
||||
0
|
||||
|
@ -285,9 +287,9 @@ SELECT count(*) FROM orders_hash_partitioned
|
|||
|
||||
SELECT count(*) FROM orders_hash_partitioned
|
||||
WHERE o_orderkey = random() AND o_orderkey = 1;
|
||||
DEBUG: predicate pruning for shardId 102034
|
||||
DEBUG: predicate pruning for shardId 102035
|
||||
DEBUG: predicate pruning for shardId 102036
|
||||
DEBUG: predicate pruning for shardId 630001
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
DEBUG: predicate pruning for shardId 630003
|
||||
count
|
||||
-------
|
||||
0
|
||||
|
@ -319,12 +321,12 @@ SELECT count(*)
|
|||
WHERE orders1.o_orderkey = orders2.o_orderkey
|
||||
AND orders1.o_orderkey = 1
|
||||
AND orders2.o_orderkey is NULL;
|
||||
DEBUG: predicate pruning for shardId 102034
|
||||
DEBUG: predicate pruning for shardId 102035
|
||||
DEBUG: predicate pruning for shardId 102036
|
||||
DEBUG: predicate pruning for shardId 102033
|
||||
DEBUG: predicate pruning for shardId 102034
|
||||
DEBUG: predicate pruning for shardId 102036
|
||||
DEBUG: predicate pruning for shardId 630001
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
DEBUG: predicate pruning for shardId 630003
|
||||
DEBUG: predicate pruning for shardId 630000
|
||||
DEBUG: predicate pruning for shardId 630001
|
||||
DEBUG: predicate pruning for shardId 630003
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [0,1073741823]
|
||||
count
|
||||
-------
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
--
|
||||
-- Check that we can run CREATE INDEX and DROP INDEX statements on distributed
|
||||
-- tables.
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 640000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 640000;
|
||||
--
|
||||
-- CREATE TEST TABLES
|
||||
--
|
||||
|
@ -137,11 +139,11 @@ ERROR: creating unique indexes on append-partitioned tables is currently unsupp
|
|||
CREATE INDEX lineitem_orderkey_index ON lineitem (l_orderkey);
|
||||
ERROR: relation "lineitem_orderkey_index" already exists
|
||||
CREATE INDEX try_index ON lineitem USING gist (l_orderkey);
|
||||
WARNING: Bad result from localhost:57637
|
||||
WARNING: Bad result from localhost:57638
|
||||
DETAIL: Remote message: data type bigint has no default operator class for access method "gist"
|
||||
ERROR: could not execute DDL command on worker node shards
|
||||
CREATE INDEX try_index ON lineitem (non_existent_column);
|
||||
WARNING: Bad result from localhost:57637
|
||||
WARNING: Bad result from localhost:57638
|
||||
DETAIL: Remote message: column "non_existent_column" does not exist
|
||||
ERROR: could not execute DDL command on worker node shards
|
||||
CREATE INDEX ON lineitem (l_orderkey);
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- MULTI_JOIN_ORDER_ADDITIONAL
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 650000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 650000;
|
||||
-- Set configuration to print table join order and pruned shards
|
||||
SET citus.explain_distributed_queries TO off;
|
||||
SET citus.log_multi_join_order TO TRUE;
|
||||
|
@ -40,8 +42,8 @@ SELECT master_create_worker_shards('lineitem_hash', 2, 1);
|
|||
(1 row)
|
||||
|
||||
CREATE INDEX lineitem_hash_time_index ON lineitem_hash (l_shipdate);
|
||||
DEBUG: applied command on shard 102037 on node localhost:57637
|
||||
DEBUG: applied command on shard 102038 on node localhost:57638
|
||||
DEBUG: applied command on shard 650000 on node localhost:57637
|
||||
DEBUG: applied command on shard 650001 on node localhost:57638
|
||||
DEBUG: building index "lineitem_hash_time_index" on table "lineitem_hash"
|
||||
CREATE TABLE orders_hash (
|
||||
o_orderkey bigint not null,
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- MULTI_JOIN_ORDER_TPCH_LARGE
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 660000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 660000;
|
||||
-- Enable configuration to print table join order
|
||||
SET citus.explain_distributed_queries TO off;
|
||||
SET citus.log_multi_join_order TO TRUE;
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
--
|
||||
-- MULTI_JOIN_PRUNING
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 680000;
|
||||
-- Check that join-pruning works for joins between two large relations. For now
|
||||
-- we only check for join-pruning between locally partitioned relations. In the
|
||||
-- future we want to check for pruning between re-partitioned relations as well.
|
||||
|
@ -23,9 +24,9 @@ DEBUG: join prunable for intervals [13921,14947] and [1,5986]
|
|||
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_orderkey = o_orderkey AND l_orderkey > 9030;
|
||||
DEBUG: predicate pruning for shardId 102009
|
||||
DEBUG: predicate pruning for shardId 102010
|
||||
DEBUG: predicate pruning for shardId 102011
|
||||
DEBUG: predicate pruning for shardId 290000
|
||||
DEBUG: predicate pruning for shardId 290001
|
||||
DEBUG: predicate pruning for shardId 290002
|
||||
DEBUG: join prunable for intervals [8997,11554] and [1,5986]
|
||||
DEBUG: join prunable for intervals [11554,13920] and [1,5986]
|
||||
DEBUG: join prunable for intervals [13921,14947] and [1,5986]
|
||||
|
@ -38,12 +39,12 @@ DEBUG: join prunable for intervals [13921,14947] and [1,5986]
|
|||
-- works as expected in this case.
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_orderkey = o_orderkey AND l_orderkey > 20000;
|
||||
DEBUG: predicate pruning for shardId 102009
|
||||
DEBUG: predicate pruning for shardId 102010
|
||||
DEBUG: predicate pruning for shardId 102011
|
||||
DEBUG: predicate pruning for shardId 102012
|
||||
DEBUG: predicate pruning for shardId 102013
|
||||
DEBUG: predicate pruning for shardId 102014
|
||||
DEBUG: predicate pruning for shardId 290000
|
||||
DEBUG: predicate pruning for shardId 290001
|
||||
DEBUG: predicate pruning for shardId 290002
|
||||
DEBUG: predicate pruning for shardId 290003
|
||||
DEBUG: predicate pruning for shardId 290004
|
||||
DEBUG: predicate pruning for shardId 290005
|
||||
sum | avg
|
||||
-----+-----
|
||||
|
|
||||
|
@ -54,10 +55,10 @@ DEBUG: predicate pruning for shardId 102014
|
|||
-- out all the shards, and leave us with an empty task list.
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_orderkey = o_orderkey AND l_orderkey > 6000 AND o_orderkey < 6000;
|
||||
DEBUG: predicate pruning for shardId 102009
|
||||
DEBUG: predicate pruning for shardId 102010
|
||||
DEBUG: predicate pruning for shardId 102011
|
||||
DEBUG: predicate pruning for shardId 102016
|
||||
DEBUG: predicate pruning for shardId 290000
|
||||
DEBUG: predicate pruning for shardId 290001
|
||||
DEBUG: predicate pruning for shardId 290002
|
||||
DEBUG: predicate pruning for shardId 290007
|
||||
DEBUG: join prunable for intervals [8997,11554] and [1,5986]
|
||||
DEBUG: join prunable for intervals [11554,13920] and [1,5986]
|
||||
DEBUG: join prunable for intervals [13921,14947] and [1,5986]
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
-- transaction block here so that we don't emit debug messages with changing
|
||||
-- transaction ids in them. Also, we set the executor type to task tracker
|
||||
-- executor here, as we cannot run repartition jobs with real time executor.
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 690000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 690000;
|
||||
BEGIN;
|
||||
SET client_min_messages TO DEBUG4;
|
||||
DEBUG: CommitTransactionCommand
|
||||
|
@ -52,29 +54,29 @@ DEBUG: join prunable for intervals [8997,11554] and [1,5986]
|
|||
DEBUG: join prunable for intervals [11554,13920] and [1,5986]
|
||||
DEBUG: join prunable for intervals [13921,14947] and [1,5986]
|
||||
DEBUG: generated sql query for job 1250 and task 3
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_102009 lineitem JOIN orders_102015 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > '10'::numeric)"
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290000 lineitem JOIN orders_290006 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > '10'::numeric)"
|
||||
DEBUG: generated sql query for job 1250 and task 6
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_102010 lineitem JOIN orders_102015 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > '10'::numeric)"
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290001 lineitem JOIN orders_290006 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > '10'::numeric)"
|
||||
DEBUG: generated sql query for job 1250 and task 9
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_102011 lineitem JOIN orders_102015 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > '10'::numeric)"
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290002 lineitem JOIN orders_290006 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > '10'::numeric)"
|
||||
DEBUG: generated sql query for job 1250 and task 12
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_102012 lineitem JOIN orders_102016 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > '10'::numeric)"
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290003 lineitem JOIN orders_290007 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > '10'::numeric)"
|
||||
DEBUG: generated sql query for job 1250 and task 15
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_102013 lineitem JOIN orders_102016 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > '10'::numeric)"
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290004 lineitem JOIN orders_290007 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > '10'::numeric)"
|
||||
DEBUG: generated sql query for job 1250 and task 18
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_102014 lineitem JOIN orders_102016 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > '10'::numeric)"
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 12 to node localhost:57637
|
||||
DEBUG: assigned task 9 to node localhost:57638
|
||||
DEBUG: assigned task 18 to node localhost:57637
|
||||
DEBUG: assigned task 15 to node localhost:57638
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290005 lineitem JOIN orders_290007 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > '10'::numeric)"
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 9 to node localhost:57637
|
||||
DEBUG: assigned task 12 to node localhost:57638
|
||||
DEBUG: assigned task 15 to node localhost:57637
|
||||
DEBUG: assigned task 18 to node localhost:57638
|
||||
DEBUG: join prunable for intervals [1,1000] and [6001,7000]
|
||||
DEBUG: join prunable for intervals [6001,7000] and [1,1000]
|
||||
DEBUG: generated sql query for job 1251 and task 3
|
||||
DETAIL: query string: "SELECT "pg_merge_job_1250.task_000019".intermediate_column_1250_0, "pg_merge_job_1250.task_000019".intermediate_column_1250_1, "pg_merge_job_1250.task_000019".intermediate_column_1250_2, "pg_merge_job_1250.task_000019".intermediate_column_1250_3, "pg_merge_job_1250.task_000019".intermediate_column_1250_4 FROM (pg_merge_job_1250.task_000019 "pg_merge_job_1250.task_000019" JOIN part_102019 part ON (("pg_merge_job_1250.task_000019".intermediate_column_1250_0 = part.p_partkey))) WHERE (part.p_size > 8)"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_1250.task_000019".intermediate_column_1250_0, "pg_merge_job_1250.task_000019".intermediate_column_1250_1, "pg_merge_job_1250.task_000019".intermediate_column_1250_2, "pg_merge_job_1250.task_000019".intermediate_column_1250_3, "pg_merge_job_1250.task_000019".intermediate_column_1250_4 FROM (pg_merge_job_1250.task_000019 "pg_merge_job_1250.task_000019" JOIN part_290010 part ON (("pg_merge_job_1250.task_000019".intermediate_column_1250_0 = part.p_partkey))) WHERE (part.p_size > 8)"
|
||||
DEBUG: generated sql query for job 1251 and task 6
|
||||
DETAIL: query string: "SELECT "pg_merge_job_1250.task_000026".intermediate_column_1250_0, "pg_merge_job_1250.task_000026".intermediate_column_1250_1, "pg_merge_job_1250.task_000026".intermediate_column_1250_2, "pg_merge_job_1250.task_000026".intermediate_column_1250_3, "pg_merge_job_1250.task_000026".intermediate_column_1250_4 FROM (pg_merge_job_1250.task_000026 "pg_merge_job_1250.task_000026" JOIN part_102044 part ON (("pg_merge_job_1250.task_000026".intermediate_column_1250_0 = part.p_partkey))) WHERE (part.p_size > 8)"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_1250.task_000026".intermediate_column_1250_0, "pg_merge_job_1250.task_000026".intermediate_column_1250_1, "pg_merge_job_1250.task_000026".intermediate_column_1250_2, "pg_merge_job_1250.task_000026".intermediate_column_1250_3, "pg_merge_job_1250.task_000026".intermediate_column_1250_4 FROM (pg_merge_job_1250.task_000026 "pg_merge_job_1250.task_000026" JOIN part_280002 part ON (("pg_merge_job_1250.task_000026".intermediate_column_1250_0 = part.p_partkey))) WHERE (part.p_size > 8)"
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 19
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
|
@ -88,11 +90,11 @@ DEBUG: join prunable for intervals [1001,2000] and [6001,7000]
|
|||
DEBUG: join prunable for intervals [6001,7000] and [1,1000]
|
||||
DEBUG: join prunable for intervals [6001,7000] and [1001,2000]
|
||||
DEBUG: generated sql query for job 1252 and task 3
|
||||
DETAIL: query string: "SELECT "pg_merge_job_1251.task_000007".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000007".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000007 "pg_merge_job_1251.task_000007" JOIN customer_102017 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000007".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000007".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000007".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000007".intermediate_column_1251_0, "pg_merge_job_1251.task_000007".intermediate_column_1251_1 ORDER BY "pg_merge_job_1251.task_000007".intermediate_column_1251_0, "pg_merge_job_1251.task_000007".intermediate_column_1251_1, "pg_merge_job_1251.task_000007".intermediate_column_1251_0, "pg_merge_job_1251.task_000007".intermediate_column_1251_1 LIMIT '30'::bigint"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_1251.task_000007".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000007".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000007 "pg_merge_job_1251.task_000007" JOIN customer_290008 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000007".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000007".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000007".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000007".intermediate_column_1251_0, "pg_merge_job_1251.task_000007".intermediate_column_1251_1 ORDER BY "pg_merge_job_1251.task_000007".intermediate_column_1251_0, "pg_merge_job_1251.task_000007".intermediate_column_1251_1, "pg_merge_job_1251.task_000007".intermediate_column_1251_0, "pg_merge_job_1251.task_000007".intermediate_column_1251_1 LIMIT '30'::bigint"
|
||||
DEBUG: generated sql query for job 1252 and task 6
|
||||
DETAIL: query string: "SELECT "pg_merge_job_1251.task_000010".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000010".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000010 "pg_merge_job_1251.task_000010" JOIN customer_102043 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000010".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000010".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000010".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000010".intermediate_column_1251_0, "pg_merge_job_1251.task_000010".intermediate_column_1251_1 ORDER BY "pg_merge_job_1251.task_000010".intermediate_column_1251_0, "pg_merge_job_1251.task_000010".intermediate_column_1251_1, "pg_merge_job_1251.task_000010".intermediate_column_1251_0, "pg_merge_job_1251.task_000010".intermediate_column_1251_1 LIMIT '30'::bigint"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_1251.task_000010".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000010".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000010 "pg_merge_job_1251.task_000010" JOIN customer_280001 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000010".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000010".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000010".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000010".intermediate_column_1251_0, "pg_merge_job_1251.task_000010".intermediate_column_1251_1 ORDER BY "pg_merge_job_1251.task_000010".intermediate_column_1251_0, "pg_merge_job_1251.task_000010".intermediate_column_1251_1, "pg_merge_job_1251.task_000010".intermediate_column_1251_0, "pg_merge_job_1251.task_000010".intermediate_column_1251_1 LIMIT '30'::bigint"
|
||||
DEBUG: generated sql query for job 1252 and task 9
|
||||
DETAIL: query string: "SELECT "pg_merge_job_1251.task_000013".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000013".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000013 "pg_merge_job_1251.task_000013" JOIN customer_102042 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000013".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000013".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000013".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000013".intermediate_column_1251_0, "pg_merge_job_1251.task_000013".intermediate_column_1251_1 ORDER BY "pg_merge_job_1251.task_000013".intermediate_column_1251_0, "pg_merge_job_1251.task_000013".intermediate_column_1251_1, "pg_merge_job_1251.task_000013".intermediate_column_1251_0, "pg_merge_job_1251.task_000013".intermediate_column_1251_1 LIMIT '30'::bigint"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_1251.task_000013".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000013".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000013 "pg_merge_job_1251.task_000013" JOIN customer_280000 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000013".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000013".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000013".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000013".intermediate_column_1251_0, "pg_merge_job_1251.task_000013".intermediate_column_1251_1 ORDER BY "pg_merge_job_1251.task_000013".intermediate_column_1251_0, "pg_merge_job_1251.task_000013".intermediate_column_1251_1, "pg_merge_job_1251.task_000013".intermediate_column_1251_0, "pg_merge_job_1251.task_000013".intermediate_column_1251_1 LIMIT '30'::bigint"
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 7
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
|
@ -100,8 +102,8 @@ DETAIL: Creating dependency on merge taskId 10
|
|||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 13
|
||||
DEBUG: assigned task 9 to node localhost:57637
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DEBUG: completed cleanup query for job 1252 on node "localhost:57638"
|
||||
DEBUG: completed cleanup query for job 1252 on node "localhost:57637"
|
||||
DEBUG: completed cleanup query for job 1251 on node "localhost:57638"
|
||||
|
@ -156,29 +158,29 @@ ORDER BY
|
|||
l_partkey, o_orderkey;
|
||||
DEBUG: StartTransactionCommand
|
||||
DEBUG: generated sql query for job 1253 and task 2
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_102009 lineitem WHERE (l_quantity < 5.0)"
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290000 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for job 1253 and task 4
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_102010 lineitem WHERE (l_quantity < 5.0)"
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290001 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for job 1253 and task 6
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_102011 lineitem WHERE (l_quantity < 5.0)"
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290002 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for job 1253 and task 8
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_102012 lineitem WHERE (l_quantity < 5.0)"
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290003 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for job 1253 and task 10
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_102013 lineitem WHERE (l_quantity < 5.0)"
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290004 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for job 1253 and task 12
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_102014 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: assigned task 8 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 12 to node localhost:57637
|
||||
DEBUG: assigned task 10 to node localhost:57638
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290005 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 4 to node localhost:57638
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 8 to node localhost:57638
|
||||
DEBUG: assigned task 10 to node localhost:57637
|
||||
DEBUG: assigned task 12 to node localhost:57638
|
||||
DEBUG: generated sql query for job 1254 and task 2
|
||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_102015 orders WHERE (o_totalprice <> 4.0)"
|
||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290006 orders WHERE (o_totalprice <> 4.0)"
|
||||
DEBUG: generated sql query for job 1254 and task 4
|
||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_102016 orders WHERE (o_totalprice <> 4.0)"
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290007 orders WHERE (o_totalprice <> 4.0)"
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 4 to node localhost:57638
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
DEBUG: join prunable for task partitionId 0 and 3
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
-- transaction block here so that we don't emit debug messages with changing
|
||||
-- transaction ids in them. Also, we set the executor type to task tracker
|
||||
-- executor here, as we cannot run repartition jobs with real time executor.
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 690000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 690000;
|
||||
BEGIN;
|
||||
SET client_min_messages TO DEBUG4;
|
||||
DEBUG: CommitTransactionCommand
|
||||
|
@ -52,29 +54,29 @@ DEBUG: join prunable for intervals [8997,11554] and [1,5986]
|
|||
DEBUG: join prunable for intervals [11554,13920] and [1,5986]
|
||||
DEBUG: join prunable for intervals [13921,14947] and [1,5986]
|
||||
DEBUG: generated sql query for job 1250 and task 3
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_102009 lineitem JOIN orders_102015 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > 10::numeric)"
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290000 lineitem JOIN orders_290006 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > 10::numeric)"
|
||||
DEBUG: generated sql query for job 1250 and task 6
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_102010 lineitem JOIN orders_102015 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > 10::numeric)"
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290001 lineitem JOIN orders_290006 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > 10::numeric)"
|
||||
DEBUG: generated sql query for job 1250 and task 9
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_102011 lineitem JOIN orders_102015 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > 10::numeric)"
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290002 lineitem JOIN orders_290006 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > 10::numeric)"
|
||||
DEBUG: generated sql query for job 1250 and task 12
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_102012 lineitem JOIN orders_102016 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > 10::numeric)"
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290003 lineitem JOIN orders_290007 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > 10::numeric)"
|
||||
DEBUG: generated sql query for job 1250 and task 15
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_102013 lineitem JOIN orders_102016 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > 10::numeric)"
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290004 lineitem JOIN orders_290007 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > 10::numeric)"
|
||||
DEBUG: generated sql query for job 1250 and task 18
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_102014 lineitem JOIN orders_102016 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > 10::numeric)"
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 12 to node localhost:57637
|
||||
DEBUG: assigned task 9 to node localhost:57638
|
||||
DEBUG: assigned task 18 to node localhost:57637
|
||||
DEBUG: assigned task 15 to node localhost:57638
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290005 lineitem JOIN orders_290007 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > 10::numeric)"
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 9 to node localhost:57637
|
||||
DEBUG: assigned task 12 to node localhost:57638
|
||||
DEBUG: assigned task 15 to node localhost:57637
|
||||
DEBUG: assigned task 18 to node localhost:57638
|
||||
DEBUG: join prunable for intervals [1,1000] and [6001,7000]
|
||||
DEBUG: join prunable for intervals [6001,7000] and [1,1000]
|
||||
DEBUG: generated sql query for job 1251 and task 3
|
||||
DETAIL: query string: "SELECT "pg_merge_job_1250.task_000019".intermediate_column_1250_0, "pg_merge_job_1250.task_000019".intermediate_column_1250_1, "pg_merge_job_1250.task_000019".intermediate_column_1250_2, "pg_merge_job_1250.task_000019".intermediate_column_1250_3, "pg_merge_job_1250.task_000019".intermediate_column_1250_4 FROM (pg_merge_job_1250.task_000019 "pg_merge_job_1250.task_000019" JOIN part_102019 part ON (("pg_merge_job_1250.task_000019".intermediate_column_1250_0 = part.p_partkey))) WHERE (part.p_size > 8)"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_1250.task_000019".intermediate_column_1250_0, "pg_merge_job_1250.task_000019".intermediate_column_1250_1, "pg_merge_job_1250.task_000019".intermediate_column_1250_2, "pg_merge_job_1250.task_000019".intermediate_column_1250_3, "pg_merge_job_1250.task_000019".intermediate_column_1250_4 FROM (pg_merge_job_1250.task_000019 "pg_merge_job_1250.task_000019" JOIN part_290010 part ON (("pg_merge_job_1250.task_000019".intermediate_column_1250_0 = part.p_partkey))) WHERE (part.p_size > 8)"
|
||||
DEBUG: generated sql query for job 1251 and task 6
|
||||
DETAIL: query string: "SELECT "pg_merge_job_1250.task_000026".intermediate_column_1250_0, "pg_merge_job_1250.task_000026".intermediate_column_1250_1, "pg_merge_job_1250.task_000026".intermediate_column_1250_2, "pg_merge_job_1250.task_000026".intermediate_column_1250_3, "pg_merge_job_1250.task_000026".intermediate_column_1250_4 FROM (pg_merge_job_1250.task_000026 "pg_merge_job_1250.task_000026" JOIN part_102044 part ON (("pg_merge_job_1250.task_000026".intermediate_column_1250_0 = part.p_partkey))) WHERE (part.p_size > 8)"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_1250.task_000026".intermediate_column_1250_0, "pg_merge_job_1250.task_000026".intermediate_column_1250_1, "pg_merge_job_1250.task_000026".intermediate_column_1250_2, "pg_merge_job_1250.task_000026".intermediate_column_1250_3, "pg_merge_job_1250.task_000026".intermediate_column_1250_4 FROM (pg_merge_job_1250.task_000026 "pg_merge_job_1250.task_000026" JOIN part_280002 part ON (("pg_merge_job_1250.task_000026".intermediate_column_1250_0 = part.p_partkey))) WHERE (part.p_size > 8)"
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 19
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
|
@ -88,11 +90,11 @@ DEBUG: join prunable for intervals [1001,2000] and [6001,7000]
|
|||
DEBUG: join prunable for intervals [6001,7000] and [1,1000]
|
||||
DEBUG: join prunable for intervals [6001,7000] and [1001,2000]
|
||||
DEBUG: generated sql query for job 1252 and task 3
|
||||
DETAIL: query string: "SELECT "pg_merge_job_1251.task_000007".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000007".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000007 "pg_merge_job_1251.task_000007" JOIN customer_102017 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000007".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000007".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000007".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000007".intermediate_column_1251_0, "pg_merge_job_1251.task_000007".intermediate_column_1251_1 ORDER BY "pg_merge_job_1251.task_000007".intermediate_column_1251_0, "pg_merge_job_1251.task_000007".intermediate_column_1251_1, "pg_merge_job_1251.task_000007".intermediate_column_1251_0, "pg_merge_job_1251.task_000007".intermediate_column_1251_1 LIMIT 30::bigint"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_1251.task_000007".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000007".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000007 "pg_merge_job_1251.task_000007" JOIN customer_290008 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000007".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000007".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000007".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000007".intermediate_column_1251_0, "pg_merge_job_1251.task_000007".intermediate_column_1251_1 ORDER BY "pg_merge_job_1251.task_000007".intermediate_column_1251_0, "pg_merge_job_1251.task_000007".intermediate_column_1251_1, "pg_merge_job_1251.task_000007".intermediate_column_1251_0, "pg_merge_job_1251.task_000007".intermediate_column_1251_1 LIMIT 30::bigint"
|
||||
DEBUG: generated sql query for job 1252 and task 6
|
||||
DETAIL: query string: "SELECT "pg_merge_job_1251.task_000010".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000010".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000010 "pg_merge_job_1251.task_000010" JOIN customer_102043 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000010".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000010".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000010".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000010".intermediate_column_1251_0, "pg_merge_job_1251.task_000010".intermediate_column_1251_1 ORDER BY "pg_merge_job_1251.task_000010".intermediate_column_1251_0, "pg_merge_job_1251.task_000010".intermediate_column_1251_1, "pg_merge_job_1251.task_000010".intermediate_column_1251_0, "pg_merge_job_1251.task_000010".intermediate_column_1251_1 LIMIT 30::bigint"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_1251.task_000010".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000010".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000010 "pg_merge_job_1251.task_000010" JOIN customer_280001 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000010".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000010".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000010".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000010".intermediate_column_1251_0, "pg_merge_job_1251.task_000010".intermediate_column_1251_1 ORDER BY "pg_merge_job_1251.task_000010".intermediate_column_1251_0, "pg_merge_job_1251.task_000010".intermediate_column_1251_1, "pg_merge_job_1251.task_000010".intermediate_column_1251_0, "pg_merge_job_1251.task_000010".intermediate_column_1251_1 LIMIT 30::bigint"
|
||||
DEBUG: generated sql query for job 1252 and task 9
|
||||
DETAIL: query string: "SELECT "pg_merge_job_1251.task_000013".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000013".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000013 "pg_merge_job_1251.task_000013" JOIN customer_102042 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000013".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000013".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000013".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000013".intermediate_column_1251_0, "pg_merge_job_1251.task_000013".intermediate_column_1251_1 ORDER BY "pg_merge_job_1251.task_000013".intermediate_column_1251_0, "pg_merge_job_1251.task_000013".intermediate_column_1251_1, "pg_merge_job_1251.task_000013".intermediate_column_1251_0, "pg_merge_job_1251.task_000013".intermediate_column_1251_1 LIMIT 30::bigint"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_1251.task_000013".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000013".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000013 "pg_merge_job_1251.task_000013" JOIN customer_280000 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000013".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000013".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000013".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000013".intermediate_column_1251_0, "pg_merge_job_1251.task_000013".intermediate_column_1251_1 ORDER BY "pg_merge_job_1251.task_000013".intermediate_column_1251_0, "pg_merge_job_1251.task_000013".intermediate_column_1251_1, "pg_merge_job_1251.task_000013".intermediate_column_1251_0, "pg_merge_job_1251.task_000013".intermediate_column_1251_1 LIMIT 30::bigint"
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 7
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
|
@ -100,8 +102,8 @@ DETAIL: Creating dependency on merge taskId 10
|
|||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 13
|
||||
DEBUG: assigned task 9 to node localhost:57637
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DEBUG: completed cleanup query for job 1252 on node "localhost:57638"
|
||||
DEBUG: completed cleanup query for job 1252 on node "localhost:57637"
|
||||
DEBUG: completed cleanup query for job 1251 on node "localhost:57638"
|
||||
|
@ -156,29 +158,29 @@ ORDER BY
|
|||
l_partkey, o_orderkey;
|
||||
DEBUG: StartTransactionCommand
|
||||
DEBUG: generated sql query for job 1253 and task 2
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_102009 lineitem WHERE (l_quantity < 5.0)"
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290000 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for job 1253 and task 4
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_102010 lineitem WHERE (l_quantity < 5.0)"
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290001 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for job 1253 and task 6
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_102011 lineitem WHERE (l_quantity < 5.0)"
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290002 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for job 1253 and task 8
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_102012 lineitem WHERE (l_quantity < 5.0)"
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290003 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for job 1253 and task 10
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_102013 lineitem WHERE (l_quantity < 5.0)"
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290004 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for job 1253 and task 12
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_102014 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: assigned task 8 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 12 to node localhost:57637
|
||||
DEBUG: assigned task 10 to node localhost:57638
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290005 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 4 to node localhost:57638
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 8 to node localhost:57638
|
||||
DEBUG: assigned task 10 to node localhost:57637
|
||||
DEBUG: assigned task 12 to node localhost:57638
|
||||
DEBUG: generated sql query for job 1254 and task 2
|
||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_102015 orders WHERE (o_totalprice <> 4.0)"
|
||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290006 orders WHERE (o_totalprice <> 4.0)"
|
||||
DEBUG: generated sql query for job 1254 and task 4
|
||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_102016 orders WHERE (o_totalprice <> 4.0)"
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290007 orders WHERE (o_totalprice <> 4.0)"
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 4 to node localhost:57638
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
DEBUG: join prunable for task partitionId 0 and 3
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
-- Tests covering partition and join-pruning for large table joins. Note that we
|
||||
-- set executor type to task tracker executor here, as we cannot run repartition
|
||||
-- jobs with real time executor.
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 700000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 700000;
|
||||
SET citus.large_table_shard_count TO 2;
|
||||
SET client_min_messages TO DEBUG2;
|
||||
SET citus.task_executor_type TO 'task-tracker';
|
||||
|
@ -40,8 +42,8 @@ FROM
|
|||
WHERE
|
||||
o_custkey = c_custkey AND
|
||||
o_orderkey < 0;
|
||||
DEBUG: predicate pruning for shardId 102015
|
||||
DEBUG: predicate pruning for shardId 102016
|
||||
DEBUG: predicate pruning for shardId 290006
|
||||
DEBUG: predicate pruning for shardId 290007
|
||||
count
|
||||
-------
|
||||
|
||||
|
@ -56,9 +58,9 @@ FROM
|
|||
WHERE
|
||||
o_custkey = c_custkey AND
|
||||
c_custkey < 0;
|
||||
DEBUG: predicate pruning for shardId 102017
|
||||
DEBUG: predicate pruning for shardId 102043
|
||||
DEBUG: predicate pruning for shardId 102042
|
||||
DEBUG: predicate pruning for shardId 290008
|
||||
DEBUG: predicate pruning for shardId 280001
|
||||
DEBUG: predicate pruning for shardId 280000
|
||||
count
|
||||
-------
|
||||
|
||||
|
@ -115,12 +117,12 @@ FROM
|
|||
WHERE
|
||||
l_partkey = c_nationkey AND
|
||||
l_orderkey < 0;
|
||||
DEBUG: predicate pruning for shardId 102009
|
||||
DEBUG: predicate pruning for shardId 102010
|
||||
DEBUG: predicate pruning for shardId 102011
|
||||
DEBUG: predicate pruning for shardId 102012
|
||||
DEBUG: predicate pruning for shardId 102013
|
||||
DEBUG: predicate pruning for shardId 102014
|
||||
DEBUG: predicate pruning for shardId 290000
|
||||
DEBUG: predicate pruning for shardId 290001
|
||||
DEBUG: predicate pruning for shardId 290002
|
||||
DEBUG: predicate pruning for shardId 290003
|
||||
DEBUG: predicate pruning for shardId 290004
|
||||
DEBUG: predicate pruning for shardId 290005
|
||||
count
|
||||
-------
|
||||
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
-- and dual hash repartition joins. The tests also cover task assignment propagation
|
||||
-- from a sql task to its depended tasks. Note that we set the executor type to task
|
||||
-- tracker executor here, as we cannot run repartition jobs with real time executor.
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 710000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 710000;
|
||||
BEGIN;
|
||||
SET client_min_messages TO DEBUG3;
|
||||
DEBUG: CommitTransactionCommand
|
||||
|
@ -25,8 +27,8 @@ FROM
|
|||
WHERE
|
||||
o_custkey = c_custkey;
|
||||
DEBUG: StartTransactionCommand
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 4 to node localhost:57638
|
||||
DEBUG: join prunable for intervals [1,1000] and [1001,2000]
|
||||
DEBUG: join prunable for intervals [1,1000] and [6001,7000]
|
||||
DEBUG: join prunable for intervals [1001,2000] and [1,1000]
|
||||
|
@ -40,8 +42,8 @@ DETAIL: Creating dependency on merge taskId 8
|
|||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 11
|
||||
DEBUG: assigned task 9 to node localhost:57637
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DEBUG: CommitTransactionCommand
|
||||
count
|
||||
-------
|
||||
|
@ -65,11 +67,11 @@ WHERE
|
|||
o_orderkey = l_orderkey;
|
||||
DEBUG: StartTransactionCommand
|
||||
DEBUG: assigned task 15 to node localhost:57637
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 9 to node localhost:57638
|
||||
DEBUG: assigned task 18 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 9 to node localhost:57637
|
||||
DEBUG: assigned task 12 to node localhost:57638
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: join prunable for intervals [1,2496] and [2497,4964]
|
||||
DEBUG: join prunable for intervals [1,2496] and [4965,5986]
|
||||
DEBUG: join prunable for intervals [1,2496] and [8997,11554]
|
||||
|
@ -114,10 +116,10 @@ DEBUG: pruning merge fetch taskId 19
|
|||
DETAIL: Creating dependency on merge taskId 47
|
||||
DEBUG: pruning merge fetch taskId 22
|
||||
DETAIL: Creating dependency on merge taskId 54
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 12 to node localhost:57637
|
||||
DEBUG: assigned task 9 to node localhost:57638
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 9 to node localhost:57637
|
||||
DEBUG: assigned task 12 to node localhost:57638
|
||||
DEBUG: assigned task 18 to node localhost:57637
|
||||
DEBUG: assigned task 24 to node localhost:57638
|
||||
DEBUG: propagating assignment from merge task 40 to constrained sql task 15
|
||||
|
@ -154,15 +156,15 @@ FROM
|
|||
WHERE
|
||||
l_partkey = c_nationkey;
|
||||
DEBUG: StartTransactionCommand
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: assigned task 8 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 12 to node localhost:57637
|
||||
DEBUG: assigned task 10 to node localhost:57638
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 4 to node localhost:57638
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 8 to node localhost:57638
|
||||
DEBUG: assigned task 10 to node localhost:57637
|
||||
DEBUG: assigned task 12 to node localhost:57638
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 4 to node localhost:57638
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
DEBUG: join prunable for task partitionId 0 and 3
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- MULTI_LIMIT_CLAUSE
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 730000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 730000;
|
||||
-- Display debug messages on limit clause push down.
|
||||
SET client_min_messages TO DEBUG1;
|
||||
-- Check that we can correctly handle the Limit clause in distributed queries.
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- MULTI_LIMIT_CLAUSE_APPROXIMATE
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 720000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 720000;
|
||||
-- Display debug messages on limit clause push down.
|
||||
SET client_min_messages TO DEBUG1;
|
||||
-- We first look at results with limit optimization disabled. This first query
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
-- MULTI_MASTER_PROTOCOL
|
||||
--
|
||||
-- Tests that check the metadata returned by the master node.
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 740000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 740000;
|
||||
SELECT part_storage_type, part_key, part_replica_count, part_max_size,
|
||||
part_placement_policy FROM master_get_table_metadata('lineitem');
|
||||
part_storage_type | part_key | part_replica_count | part_max_size | part_placement_policy
|
||||
|
@ -20,7 +22,7 @@ SELECT * FROM master_get_table_ddl_events('lineitem');
|
|||
SELECT * FROM master_get_new_shardid();
|
||||
master_get_new_shardid
|
||||
------------------------
|
||||
102008
|
||||
740000
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM master_get_local_first_candidate_nodes();
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 750000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 750000;
|
||||
-- ===================================================================
|
||||
-- test end-to-end modification functionality
|
||||
-- ===================================================================
|
||||
|
@ -93,7 +95,7 @@ INSERT INTO append_partitioned VALUES (414123, 'AAPL', 9580, '2004-10-19 10:23:5
|
|||
SET client_min_messages TO 'DEBUG2';
|
||||
SET citus.task_executor_type TO 'real-time';
|
||||
SELECT * FROM range_partitioned WHERE id = 32743;
|
||||
DEBUG: predicate pruning for shardId 103084
|
||||
DEBUG: predicate pruning for shardId 750004
|
||||
DEBUG: Plan is router executable
|
||||
id | symbol | bidder_id | placed_at | kind | limit_price
|
||||
-------+--------+-----------+--------------------------+------+-------------
|
||||
|
@ -101,7 +103,7 @@ DEBUG: Plan is router executable
|
|||
(1 row)
|
||||
|
||||
SELECT * FROM append_partitioned WHERE id = 414123;
|
||||
DEBUG: predicate pruning for shardId 103086
|
||||
DEBUG: predicate pruning for shardId 750006
|
||||
DEBUG: Plan is router executable
|
||||
id | symbol | bidder_id | placed_at | kind | limit_price
|
||||
--------+--------+-----------+--------------------------+------+-------------
|
||||
|
@ -275,7 +277,7 @@ WHERE nodename = 'localhost' AND
|
|||
-- Fourth: Perform the same INSERT (primary key violation)
|
||||
INSERT INTO limit_orders VALUES (275, 'ADR', 140, '2007-07-02 16:32:15', 'sell', 43.67);
|
||||
WARNING: Bad result from localhost:57638
|
||||
DETAIL: Remote message: duplicate key value violates unique constraint "limit_orders_pkey_103081"
|
||||
DETAIL: Remote message: duplicate key value violates unique constraint "limit_orders_pkey_750001"
|
||||
-- Last: Verify the insert worked but the placement with the PK violation is now unhealthy
|
||||
SELECT count(*) FROM limit_orders WHERE id = 275;
|
||||
count
|
||||
|
|
|
@ -3,16 +3,18 @@
|
|||
--
|
||||
-- This test checks that we can handle null min/max values in shard statistics
|
||||
-- and that we don't partition or join prune shards that have null values.
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 760000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 760000;
|
||||
SET client_min_messages TO DEBUG2;
|
||||
-- Change configuration to treat lineitem and orders tables as large
|
||||
SET citus.large_table_shard_count TO 2;
|
||||
SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 102009;
|
||||
SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290000;
|
||||
shardminvalue | shardmaxvalue
|
||||
---------------+---------------
|
||||
1 | 2496
|
||||
(1 row)
|
||||
|
||||
SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 102010;
|
||||
SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290001;
|
||||
shardminvalue | shardmaxvalue
|
||||
---------------+---------------
|
||||
2497 | 4964
|
||||
|
@ -21,24 +23,24 @@ SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 102010;
|
|||
-- Check that partition and join pruning works when min/max values exist
|
||||
-- Adding l_orderkey = 1 to make the query not router executable
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1;
|
||||
DEBUG: predicate pruning for shardId 102010
|
||||
DEBUG: predicate pruning for shardId 102011
|
||||
DEBUG: predicate pruning for shardId 102013
|
||||
DEBUG: predicate pruning for shardId 102014
|
||||
DEBUG: predicate pruning for shardId 290001
|
||||
DEBUG: predicate pruning for shardId 290002
|
||||
DEBUG: predicate pruning for shardId 290004
|
||||
DEBUG: predicate pruning for shardId 290005
|
||||
l_orderkey | l_linenumber | l_shipdate
|
||||
------------+--------------+------------
|
||||
9030 | 1 | 09-02-1998
|
||||
9030 | 2 | 08-19-1998
|
||||
9030 | 3 | 08-27-1998
|
||||
9030 | 4 | 07-20-1998
|
||||
9030 | 5 | 09-29-1998
|
||||
9030 | 6 | 09-03-1998
|
||||
1 | 1 | 03-13-1996
|
||||
1 | 2 | 04-12-1996
|
||||
1 | 3 | 01-29-1996
|
||||
1 | 4 | 04-21-1996
|
||||
1 | 5 | 03-30-1996
|
||||
1 | 6 | 01-30-1996
|
||||
9030 | 1 | 09-02-1998
|
||||
9030 | 2 | 08-19-1998
|
||||
9030 | 3 | 08-27-1998
|
||||
9030 | 4 | 07-20-1998
|
||||
9030 | 5 | 09-29-1998
|
||||
9030 | 6 | 09-03-1998
|
||||
(12 rows)
|
||||
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
|
@ -56,12 +58,12 @@ DEBUG: join prunable for intervals [13921,14947] and [1,5986]
|
|||
|
||||
-- Now set the minimum value for a shard to null. Then check that we don't apply
|
||||
-- partition or join pruning for the shard with null min value.
|
||||
UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 102009;
|
||||
UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 290000;
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
|
||||
DEBUG: predicate pruning for shardId 102010
|
||||
DEBUG: predicate pruning for shardId 102011
|
||||
DEBUG: predicate pruning for shardId 102013
|
||||
DEBUG: predicate pruning for shardId 102014
|
||||
DEBUG: predicate pruning for shardId 290001
|
||||
DEBUG: predicate pruning for shardId 290002
|
||||
DEBUG: predicate pruning for shardId 290004
|
||||
DEBUG: predicate pruning for shardId 290005
|
||||
l_orderkey | l_linenumber | l_shipdate
|
||||
------------+--------------+------------
|
||||
9030 | 1 | 09-02-1998
|
||||
|
@ -86,11 +88,11 @@ DEBUG: join prunable for intervals [13921,14947] and [1,5986]
|
|||
|
||||
-- Next, set the maximum value for another shard to null. Then check that we
|
||||
-- don't apply partition or join pruning for this other shard either.
|
||||
UPDATE pg_dist_shard SET shardmaxvalue = NULL WHERE shardid = 102010;
|
||||
UPDATE pg_dist_shard SET shardmaxvalue = NULL WHERE shardid = 290001;
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
|
||||
DEBUG: predicate pruning for shardId 102011
|
||||
DEBUG: predicate pruning for shardId 102013
|
||||
DEBUG: predicate pruning for shardId 102014
|
||||
DEBUG: predicate pruning for shardId 290002
|
||||
DEBUG: predicate pruning for shardId 290004
|
||||
DEBUG: predicate pruning for shardId 290005
|
||||
l_orderkey | l_linenumber | l_shipdate
|
||||
------------+--------------+------------
|
||||
9030 | 1 | 09-02-1998
|
||||
|
@ -114,12 +116,12 @@ DEBUG: join prunable for intervals [13921,14947] and [1,5986]
|
|||
|
||||
-- Last, set the minimum value to 0 and check that we don't treat it as null. We
|
||||
-- should apply partition and join pruning for this shard now.
|
||||
UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = 102009;
|
||||
UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = 290000;
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
|
||||
DEBUG: predicate pruning for shardId 102009
|
||||
DEBUG: predicate pruning for shardId 102011
|
||||
DEBUG: predicate pruning for shardId 102013
|
||||
DEBUG: predicate pruning for shardId 102014
|
||||
DEBUG: predicate pruning for shardId 290000
|
||||
DEBUG: predicate pruning for shardId 290002
|
||||
DEBUG: predicate pruning for shardId 290004
|
||||
DEBUG: predicate pruning for shardId 290005
|
||||
l_orderkey | l_linenumber | l_shipdate
|
||||
------------+--------------+------------
|
||||
9030 | 1 | 09-02-1998
|
||||
|
@ -143,6 +145,6 @@ DEBUG: join prunable for intervals [13921,14947] and [1,5986]
|
|||
(1 row)
|
||||
|
||||
-- Set minimum and maximum values for two shards back to their original values
|
||||
UPDATE pg_dist_shard SET shardminvalue = '1' WHERE shardid = 102009;
|
||||
UPDATE pg_dist_shard SET shardmaxvalue = '4964' WHERE shardid = 102010;
|
||||
UPDATE pg_dist_shard SET shardminvalue = '1' WHERE shardid = 290000;
|
||||
UPDATE pg_dist_shard SET shardmaxvalue = '4964' WHERE shardid = 290001;
|
||||
SET client_min_messages TO NOTICE;
|
||||
|
|
|
@ -3,28 +3,30 @@
|
|||
--
|
||||
-- Tests to verify that we correctly prune unreferenced shards. For this, we
|
||||
-- need to increase the logging verbosity of messages displayed on the client.
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 770000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 770000;
|
||||
SET citus.explain_distributed_queries TO off;
|
||||
SET client_min_messages TO DEBUG2;
|
||||
-- Adding additional l_orderkey = 1 to make this query not router executable
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1;
|
||||
DEBUG: predicate pruning for shardId 102010
|
||||
DEBUG: predicate pruning for shardId 102011
|
||||
DEBUG: predicate pruning for shardId 102013
|
||||
DEBUG: predicate pruning for shardId 102014
|
||||
DEBUG: predicate pruning for shardId 290001
|
||||
DEBUG: predicate pruning for shardId 290002
|
||||
DEBUG: predicate pruning for shardId 290004
|
||||
DEBUG: predicate pruning for shardId 290005
|
||||
l_orderkey | l_linenumber | l_shipdate
|
||||
------------+--------------+------------
|
||||
9030 | 1 | 09-02-1998
|
||||
9030 | 2 | 08-19-1998
|
||||
9030 | 3 | 08-27-1998
|
||||
9030 | 4 | 07-20-1998
|
||||
9030 | 5 | 09-29-1998
|
||||
9030 | 6 | 09-03-1998
|
||||
1 | 1 | 03-13-1996
|
||||
1 | 2 | 04-12-1996
|
||||
1 | 3 | 01-29-1996
|
||||
1 | 4 | 04-21-1996
|
||||
1 | 5 | 03-30-1996
|
||||
1 | 6 | 01-30-1996
|
||||
9030 | 1 | 09-02-1998
|
||||
9030 | 2 | 08-19-1998
|
||||
9030 | 3 | 08-27-1998
|
||||
9030 | 4 | 07-20-1998
|
||||
9030 | 5 | 09-29-1998
|
||||
9030 | 6 | 09-03-1998
|
||||
(12 rows)
|
||||
|
||||
-- We use the l_linenumber field for the following aggregations. We need to use
|
||||
|
@ -33,9 +35,9 @@ DEBUG: predicate pruning for shardId 102014
|
|||
-- trigger the the creation of toasted tables and indexes. This in turn prints
|
||||
-- non-deterministic debug messages. To avoid this chain, we use l_linenumber.
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
|
||||
DEBUG: predicate pruning for shardId 102009
|
||||
DEBUG: predicate pruning for shardId 102010
|
||||
DEBUG: predicate pruning for shardId 102011
|
||||
DEBUG: predicate pruning for shardId 290000
|
||||
DEBUG: predicate pruning for shardId 290001
|
||||
DEBUG: predicate pruning for shardId 290002
|
||||
sum | avg
|
||||
-------+--------------------
|
||||
17999 | 3.0189533713518953
|
||||
|
@ -43,7 +45,7 @@ DEBUG: predicate pruning for shardId 102011
|
|||
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem
|
||||
WHERE (l_orderkey < 4000 OR l_orderkey > 9030);
|
||||
DEBUG: predicate pruning for shardId 102011
|
||||
DEBUG: predicate pruning for shardId 290002
|
||||
sum | avg
|
||||
-------+--------------------
|
||||
30184 | 3.0159872102318145
|
||||
|
@ -51,12 +53,12 @@ DEBUG: predicate pruning for shardId 102011
|
|||
|
||||
-- The following query should prune out all shards and return empty results
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem WHERE l_orderkey > 20000;
|
||||
DEBUG: predicate pruning for shardId 102009
|
||||
DEBUG: predicate pruning for shardId 102010
|
||||
DEBUG: predicate pruning for shardId 102011
|
||||
DEBUG: predicate pruning for shardId 102012
|
||||
DEBUG: predicate pruning for shardId 102013
|
||||
DEBUG: predicate pruning for shardId 102014
|
||||
DEBUG: predicate pruning for shardId 290000
|
||||
DEBUG: predicate pruning for shardId 290001
|
||||
DEBUG: predicate pruning for shardId 290002
|
||||
DEBUG: predicate pruning for shardId 290003
|
||||
DEBUG: predicate pruning for shardId 290004
|
||||
DEBUG: predicate pruning for shardId 290005
|
||||
sum | avg
|
||||
-----+-----
|
||||
|
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
-- Many of the queries are taken from other regression test files
|
||||
-- and converted into both plain SQL and PL/pgsql functions, which
|
||||
-- use prepared statements internally.
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 780000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 780000;
|
||||
CREATE FUNCTION sql_test_no_1() RETURNS bigint AS '
|
||||
SELECT
|
||||
count(*)
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
-- Tests covering PREPARE statements. Many of the queries are
|
||||
-- taken from other regression test files and converted into
|
||||
-- prepared statements.
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 790000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 790000;
|
||||
PREPARE prepared_test_1 AS
|
||||
SELECT
|
||||
count(*)
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 800000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 800000;
|
||||
-- ===================================================================
|
||||
-- create test functions
|
||||
-- ===================================================================
|
||||
|
@ -47,28 +49,28 @@ SELECT master_create_worker_shards('pruning', 4, 1);
|
|||
SELECT prune_using_no_values('pruning');
|
||||
prune_using_no_values
|
||||
-------------------------------
|
||||
{103070,103071,103072,103073}
|
||||
{800000,800001,800002,800003}
|
||||
(1 row)
|
||||
|
||||
-- with a single value, expect a single shard
|
||||
SELECT prune_using_single_value('pruning', 'tomato');
|
||||
prune_using_single_value
|
||||
--------------------------
|
||||
{103072}
|
||||
{800002}
|
||||
(1 row)
|
||||
|
||||
-- the above is true even if that value is null
|
||||
SELECT prune_using_single_value('pruning', NULL);
|
||||
prune_using_single_value
|
||||
--------------------------
|
||||
{103072}
|
||||
{800002}
|
||||
(1 row)
|
||||
|
||||
-- build an OR clause and expect more than one sahrd
|
||||
SELECT prune_using_either_value('pruning', 'tomato', 'petunia');
|
||||
prune_using_either_value
|
||||
--------------------------
|
||||
{103071,103072}
|
||||
{800001,800002}
|
||||
(1 row)
|
||||
|
||||
-- an AND clause with incompatible values returns no shards
|
||||
|
@ -82,7 +84,7 @@ SELECT prune_using_both_values('pruning', 'tomato', 'petunia');
|
|||
SELECT prune_using_both_values('pruning', 'tomato', 'rose');
|
||||
prune_using_both_values
|
||||
-------------------------
|
||||
{103072}
|
||||
{800002}
|
||||
(1 row)
|
||||
|
||||
-- unit test of the equality expression generation code
|
||||
|
@ -96,7 +98,7 @@ SELECT debug_equality_expression('pruning');
|
|||
SELECT print_sorted_shard_intervals('pruning');
|
||||
print_sorted_shard_intervals
|
||||
-------------------------------
|
||||
{103070,103071,103072,103073}
|
||||
{800000,800001,800002,800003}
|
||||
(1 row)
|
||||
|
||||
-- update only min value for one shard
|
||||
|
@ -104,7 +106,7 @@ UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardi
|
|||
SELECT print_sorted_shard_intervals('pruning');
|
||||
print_sorted_shard_intervals
|
||||
-------------------------------
|
||||
{103070,103072,103073,103071}
|
||||
{800000,800001,800002,800003}
|
||||
(1 row)
|
||||
|
||||
-- now lets have one more shard without min/max values
|
||||
|
@ -112,7 +114,7 @@ UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardi
|
|||
SELECT print_sorted_shard_intervals('pruning');
|
||||
print_sorted_shard_intervals
|
||||
-------------------------------
|
||||
{103070,103073,103071,103072}
|
||||
{800000,800001,800002,800003}
|
||||
(1 row)
|
||||
|
||||
-- now lets have one more shard without min/max values
|
||||
|
@ -120,7 +122,7 @@ UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardi
|
|||
SELECT print_sorted_shard_intervals('pruning');
|
||||
print_sorted_shard_intervals
|
||||
-------------------------------
|
||||
{103073,103070,103071,103072}
|
||||
{800000,800001,800002,800003}
|
||||
(1 row)
|
||||
|
||||
-- all shard placements are uninitialized
|
||||
|
@ -128,7 +130,7 @@ UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardi
|
|||
SELECT print_sorted_shard_intervals('pruning');
|
||||
print_sorted_shard_intervals
|
||||
-------------------------------
|
||||
{103070,103071,103072,103073}
|
||||
{800000,800001,800002,800003}
|
||||
(1 row)
|
||||
|
||||
-- create range distributed table observe shard pruning
|
||||
|
@ -143,25 +145,25 @@ SELECT master_create_distributed_table('pruning_range', 'species', 'range');
|
|||
SELECT master_create_empty_shard('pruning_range');
|
||||
master_create_empty_shard
|
||||
---------------------------
|
||||
103074
|
||||
800004
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_empty_shard('pruning_range');
|
||||
master_create_empty_shard
|
||||
---------------------------
|
||||
103075
|
||||
800005
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_empty_shard('pruning_range');
|
||||
master_create_empty_shard
|
||||
---------------------------
|
||||
103076
|
||||
800006
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_empty_shard('pruning_range');
|
||||
master_create_empty_shard
|
||||
---------------------------
|
||||
103077
|
||||
800007
|
||||
(1 row)
|
||||
|
||||
-- now the comparison is done via the partition column type, which is text
|
||||
|
@ -173,7 +175,7 @@ UPDATE pg_dist_shard SET shardminvalue = 'g', shardmaxvalue = 'h' WHERE shardid
|
|||
SELECT print_sorted_shard_intervals('pruning_range');
|
||||
print_sorted_shard_intervals
|
||||
-------------------------------
|
||||
{103074,103075,103076,103077}
|
||||
{800004,800005,800006,800007}
|
||||
(1 row)
|
||||
|
||||
-- update only min value for one shard
|
||||
|
@ -181,7 +183,7 @@ UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardi
|
|||
SELECT print_sorted_shard_intervals('pruning_range');
|
||||
print_sorted_shard_intervals
|
||||
-------------------------------
|
||||
{103074,103076,103077,103075}
|
||||
{800004,800005,800006,800007}
|
||||
(1 row)
|
||||
|
||||
-- now lets have one more shard without min/max values
|
||||
|
@ -189,7 +191,7 @@ UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardi
|
|||
SELECT print_sorted_shard_intervals('pruning_range');
|
||||
print_sorted_shard_intervals
|
||||
-------------------------------
|
||||
{103074,103077,103075,103076}
|
||||
{800004,800005,800006,800007}
|
||||
(1 row)
|
||||
|
||||
-- now lets have one more shard without min/max values
|
||||
|
@ -197,7 +199,7 @@ UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardi
|
|||
SELECT print_sorted_shard_intervals('pruning_range');
|
||||
print_sorted_shard_intervals
|
||||
-------------------------------
|
||||
{103077,103074,103075,103076}
|
||||
{800004,800005,800006,800007}
|
||||
(1 row)
|
||||
|
||||
-- all shard placements are uninitialized
|
||||
|
@ -205,6 +207,6 @@ UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardi
|
|||
SELECT print_sorted_shard_intervals('pruning_range');
|
||||
print_sorted_shard_intervals
|
||||
-------------------------------
|
||||
{103074,103075,103076,103077}
|
||||
{800004,800005,800006,800007}
|
||||
(1 row)
|
||||
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
-- on the master node for final processing. When the query completes or fails,
|
||||
-- the resource owner should automatically clean up these intermediate query
|
||||
-- result files.
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 810000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 810000;
|
||||
BEGIN;
|
||||
-- pg_ls_dir() displays jobids. We explicitly set the jobId sequence
|
||||
-- here so that the regression output becomes independent of the
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 820000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 820000;
|
||||
-- ===================================================================
|
||||
-- test shard repair functionality
|
||||
-- ===================================================================
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- MULTI_REPARTITIONED_SUBQUERY_UDF
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 830000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 830000;
|
||||
-- Create UDF in master and workers
|
||||
\c - - - :master_port
|
||||
DROP FUNCTION IF EXISTS median(double precision[]);
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 840000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 840000;
|
||||
-- ===================================================================
|
||||
-- test router planner functionality for single shard select queries
|
||||
-- ===================================================================
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 103300;
|
||||
CREATE TABLE articles_hash (
|
||||
id bigint NOT NULL,
|
||||
author_id bigint NOT NULL,
|
||||
|
@ -105,8 +106,8 @@ DEBUG: Creating router plan
|
|||
DEBUG: Plan is router executable
|
||||
-- first, test zero-shard SELECT, which should return an empty row
|
||||
SELECT COUNT(*) FROM articles_hash WHERE author_id = 1 AND author_id = 2;
|
||||
DEBUG: predicate pruning for shardId 103300
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840000
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
count
|
||||
-------
|
||||
|
||||
|
@ -116,7 +117,7 @@ DEBUG: predicate pruning for shardId 103301
|
|||
-- test simple select for a single row
|
||||
SELECT * FROM articles_hash WHERE author_id = 10 AND id = 50;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
id | author_id | title | word_count
|
||||
----+-----------+-----------+------------
|
||||
|
@ -126,7 +127,7 @@ DEBUG: Plan is router executable
|
|||
-- get all titles by a single author
|
||||
SELECT title FROM articles_hash WHERE author_id = 10;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
title
|
||||
------------
|
||||
|
@ -142,7 +143,7 @@ SELECT title, word_count FROM articles_hash
|
|||
WHERE author_id = 10
|
||||
ORDER BY word_count DESC NULLS LAST;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
title | word_count
|
||||
------------+------------
|
||||
|
@ -159,7 +160,7 @@ SELECT title, id FROM articles_hash
|
|||
ORDER BY id
|
||||
LIMIT 2;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
title | id
|
||||
---------+----
|
||||
|
@ -172,7 +173,7 @@ DEBUG: Plan is router executable
|
|||
SELECT title, author_id FROM articles_hash
|
||||
WHERE author_id = 7 OR author_id = 8
|
||||
ORDER BY author_id ASC, id;
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
title | author_id
|
||||
-------------+-----------
|
||||
aseptic | 7
|
||||
|
@ -190,7 +191,7 @@ DEBUG: predicate pruning for shardId 103301
|
|||
-- same query is router executable with no order by
|
||||
SELECT title, author_id FROM articles_hash
|
||||
WHERE author_id = 7 OR author_id = 8;
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
title | author_id
|
||||
-------------+-----------
|
||||
|
@ -222,7 +223,7 @@ SELECT author_id, sum(word_count) AS corpus_size FROM articles_hash
|
|||
HAVING sum(word_count) > 1000
|
||||
ORDER BY sum(word_count) DESC;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
author_id | corpus_size
|
||||
-----------+-------------
|
||||
|
@ -320,7 +321,7 @@ SELECT *
|
|||
FROM articles_hash
|
||||
WHERE author_id = 1;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
|
@ -336,7 +337,7 @@ DEBUG: Plan is router executable
|
|||
SELECT *
|
||||
FROM articles_hash
|
||||
WHERE author_id = 1 OR author_id = 17;
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
|
@ -366,7 +367,7 @@ SELECT id as article_id, word_count * id as random_value
|
|||
FROM articles_hash
|
||||
WHERE author_id = 1;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
article_id | random_value
|
||||
------------+--------------
|
||||
|
@ -385,7 +386,7 @@ SELECT a.author_id as first_author, b.word_count as second_word_count
|
|||
WHERE a.author_id = 10 and a.author_id = b.author_id
|
||||
LIMIT 3;
|
||||
DEBUG: push down of limit count: 3
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: join prunable for intervals [-2147483648,-1] and [0,2147483647]
|
||||
DEBUG: Plan is router executable
|
||||
first_author | second_word_count
|
||||
|
@ -401,7 +402,7 @@ SELECT a.author_id as first_author, b.word_count as second_word_count
|
|||
WHERE a.author_id = 10 and a.author_id = b.author_id
|
||||
LIMIT 3;
|
||||
DEBUG: push down of limit count: 3
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
first_author | second_word_count
|
||||
--------------+-------------------
|
||||
10 | 19519
|
||||
|
@ -415,7 +416,7 @@ SELECT *
|
|||
WHERE author_id = 1
|
||||
LIMIT 3;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
id | author_id | title | word_count
|
||||
----+-----------+----------+------------
|
||||
|
@ -431,7 +432,7 @@ SELECT *
|
|||
LIMIT 2
|
||||
OFFSET 1;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
id | author_id | title | word_count
|
||||
----+-----------+----------+------------
|
||||
|
@ -447,7 +448,7 @@ SELECT *
|
|||
LIMIT 2
|
||||
OFFSET 1;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
|
@ -462,7 +463,7 @@ SELECT id
|
|||
WHERE author_id = 1
|
||||
GROUP BY id;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
id
|
||||
----
|
||||
|
@ -478,7 +479,7 @@ SELECT distinct id
|
|||
FROM articles_hash
|
||||
WHERE author_id = 1;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
id
|
||||
----
|
||||
|
@ -494,7 +495,7 @@ SELECT avg(word_count)
|
|||
FROM articles_hash
|
||||
WHERE author_id = 2;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103300
|
||||
DEBUG: predicate pruning for shardId 840000
|
||||
DEBUG: Plan is router executable
|
||||
avg
|
||||
--------------------
|
||||
|
@ -507,7 +508,7 @@ SELECT max(word_count) as max, min(word_count) as min,
|
|||
FROM articles_hash
|
||||
WHERE author_id = 2;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103300
|
||||
DEBUG: predicate pruning for shardId 840000
|
||||
DEBUG: Plan is router executable
|
||||
max | min | sum | cnt
|
||||
-------+------+-------+-----
|
||||
|
@ -520,7 +521,7 @@ SELECT max(word_count)
|
|||
WHERE author_id = 1
|
||||
GROUP BY author_id;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
max
|
||||
-------
|
||||
|
@ -566,7 +567,7 @@ SET client_min_messages to 'DEBUG2';
|
|||
SELECT *
|
||||
FROM articles_hash
|
||||
WHERE author_id = 1 and author_id >= 1;
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
|
@ -596,7 +597,7 @@ SELECT *
|
|||
FROM articles_hash
|
||||
WHERE author_id = 1 and (id = 1 or id = 41);
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
id | author_id | title | word_count
|
||||
----+-----------+----------+------------
|
||||
|
@ -609,7 +610,7 @@ SELECT *
|
|||
FROM articles_hash
|
||||
WHERE author_id = 1 and (id = random()::int * 0);
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
id | author_id | title | word_count
|
||||
----+-----------+-------+------------
|
||||
|
@ -647,7 +648,7 @@ SELECT *
|
|||
FROM articles_hash
|
||||
WHERE author_id = abs(-1);
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
|
@ -689,7 +690,7 @@ SELECT *
|
|||
FROM articles_hash
|
||||
WHERE author_id = 1 and (id = abs(id - 2));
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
id | author_id | title | word_count
|
||||
----+-----------+----------+------------
|
||||
|
@ -714,7 +715,7 @@ SELECT *
|
|||
FROM articles_hash
|
||||
WHERE (author_id = 1) = true;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
|
@ -730,7 +731,7 @@ SELECT *
|
|||
FROM articles_hash
|
||||
WHERE (author_id = 1) and id between 0 and 20;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
id | author_id | title | word_count
|
||||
----+-----------+----------+------------
|
||||
|
@ -743,7 +744,7 @@ SELECT *
|
|||
FROM articles_hash
|
||||
WHERE (author_id = 1) and (id = 1 or id = 31) and title like '%s';
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
|
@ -756,7 +757,7 @@ SELECT *
|
|||
FROM articles_hash
|
||||
WHERE (id = 1 or id = 31) and title like '%s' and (author_id = 1);
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
|
@ -769,7 +770,7 @@ SELECT *
|
|||
FROM articles_hash
|
||||
WHERE (title like '%s' or title like 'a%') and (author_id = 1);
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
|
@ -785,7 +786,7 @@ SELECT *
|
|||
FROM articles_hash
|
||||
WHERE (title like '%s' or title like 'a%') and (author_id = 1) and (word_count < 3000 or word_count > 8000);
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
id | author_id | title | word_count
|
||||
----+-----------+----------+------------
|
||||
|
@ -799,7 +800,7 @@ SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count
|
|||
FROM articles_hash
|
||||
WHERE author_id = 5;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
prev | title | word_count
|
||||
----------+----------+------------
|
||||
|
@ -815,7 +816,7 @@ SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count
|
|||
WHERE author_id = 5
|
||||
ORDER BY word_count DESC;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
prev | title | word_count
|
||||
----------+----------+------------
|
||||
|
@ -830,7 +831,7 @@ SELECT id, MIN(id) over (order by word_count)
|
|||
FROM articles_hash
|
||||
WHERE author_id = 1;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
id | min
|
||||
----+-----
|
||||
|
@ -845,7 +846,7 @@ SELECT id, word_count, AVG(word_count) over (order by word_count)
|
|||
FROM articles_hash
|
||||
WHERE author_id = 1;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
id | word_count | avg
|
||||
----+------------+-----------------------
|
||||
|
@ -860,7 +861,7 @@ SELECT word_count, rank() OVER (PARTITION BY author_id ORDER BY word_count)
|
|||
FROM articles_hash
|
||||
WHERE author_id = 1;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
word_count | rank
|
||||
------------+------
|
||||
|
@ -899,7 +900,7 @@ SELECT
|
|||
WHERE
|
||||
author_id = 5;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
c
|
||||
---
|
||||
|
@ -941,7 +942,7 @@ SELECT *
|
|||
WHERE author_id = 1
|
||||
ORDER BY id;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
|
@ -961,7 +962,7 @@ DECLARE test_cursor CURSOR FOR
|
|||
WHERE author_id = 1
|
||||
ORDER BY id;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
FETCH test_cursor;
|
||||
id | author_id | title | word_count
|
||||
|
@ -983,7 +984,7 @@ COPY (
|
|||
WHERE author_id = 1
|
||||
ORDER BY id) TO STDOUT;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
1 1 arsenous 9572
|
||||
11 1 alamo 1347
|
||||
|
@ -998,14 +999,14 @@ CREATE TEMP TABLE temp_articles_hash as
|
|||
WHERE author_id = 1
|
||||
ORDER BY id;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
-- router plannable queries may include filter for aggragates
|
||||
SELECT count(*), count(*) FILTER (WHERE id < 3)
|
||||
FROM articles_hash
|
||||
WHERE author_id = 1;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
count | count
|
||||
-------+-------
|
||||
|
@ -1028,7 +1029,7 @@ PREPARE author_1_articles as
|
|||
WHERE author_id = 1;
|
||||
EXECUTE author_1_articles;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
|
@ -1046,7 +1047,7 @@ PREPARE author_articles(int) as
|
|||
WHERE author_id = $1;
|
||||
EXECUTE author_articles(1);
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Plan is router executable
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
|
@ -1073,7 +1074,7 @@ DEBUG: Creating router plan
|
|||
CONTEXT: SQL statement "SELECT MAX(id) FROM articles_hash ah
|
||||
WHERE author_id = 1"
|
||||
PL/pgSQL function author_articles_max_id() line 5 at SQL statement
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
CONTEXT: SQL statement "SELECT MAX(id) FROM articles_hash ah
|
||||
WHERE author_id = 1"
|
||||
PL/pgSQL function author_articles_max_id() line 5 at SQL statement
|
||||
|
@ -1103,7 +1104,7 @@ CONTEXT: SQL statement "SELECT ah.id, ah.word_count
|
|||
FROM articles_hash ah
|
||||
WHERE author_id = 1"
|
||||
PL/pgSQL function author_articles_id_word_count() line 4 at RETURN QUERY
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
CONTEXT: SQL statement "SELECT ah.id, ah.word_count
|
||||
FROM articles_hash ah
|
||||
WHERE author_id = 1"
|
||||
|
@ -1118,7 +1119,7 @@ SET citus.task_executor_type to 'task-tracker';
|
|||
SELECT id
|
||||
FROM articles_hash
|
||||
WHERE author_id = 1;
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
id
|
||||
----
|
||||
1
|
||||
|
@ -1136,7 +1137,7 @@ DEBUG: Plan is router executable
|
|||
SELECT id
|
||||
FROM articles_hash
|
||||
WHERE author_id = 1;
|
||||
DEBUG: predicate pruning for shardId 103301
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
id
|
||||
----
|
||||
1
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
-- MULTI_SHARD_MODIFY
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 350000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 350000;
|
||||
-- Create a new hash partitioned multi_shard_modify_test table and stage data into it.
|
||||
CREATE TABLE multi_shard_modify_test (
|
||||
t_key integer not null,
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 850000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 850000;
|
||||
-- ===================================================================
|
||||
-- test end-to-end query functionality
|
||||
-- ===================================================================
|
||||
|
@ -323,7 +325,7 @@ SELECT *
|
|||
FROM articles
|
||||
WHERE author_id = 1;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103108
|
||||
DEBUG: predicate pruning for shardId 850001
|
||||
DEBUG: Plan is router executable
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
|
@ -338,7 +340,7 @@ DEBUG: Plan is router executable
|
|||
SELECT *
|
||||
FROM articles
|
||||
WHERE author_id = 1 OR author_id = 17;
|
||||
DEBUG: predicate pruning for shardId 103108
|
||||
DEBUG: predicate pruning for shardId 850001
|
||||
DEBUG: Plan is router executable
|
||||
id | author_id | title | word_count
|
||||
----+-----------+--------------+------------
|
||||
|
@ -367,7 +369,7 @@ SELECT id as article_id, word_count * id as random_value
|
|||
FROM articles
|
||||
WHERE author_id = 1;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103108
|
||||
DEBUG: predicate pruning for shardId 850001
|
||||
DEBUG: Plan is router executable
|
||||
article_id | random_value
|
||||
------------+--------------
|
||||
|
@ -385,7 +387,7 @@ SELECT a.author_id as first_author, b.word_count as second_word_count
|
|||
WHERE a.author_id = 10 and a.author_id = b.author_id
|
||||
LIMIT 3;
|
||||
DEBUG: push down of limit count: 3
|
||||
DEBUG: predicate pruning for shardId 103108
|
||||
DEBUG: predicate pruning for shardId 850001
|
||||
DEBUG: join prunable for intervals [-2147483648,-1] and [0,2147483647]
|
||||
DEBUG: Plan is router executable
|
||||
first_author | second_word_count
|
||||
|
@ -402,7 +404,7 @@ SELECT a.author_id as first_author, b.word_count as second_word_count
|
|||
WHERE a.author_id = 10 and a.author_id = b.author_id
|
||||
LIMIT 3;
|
||||
DEBUG: push down of limit count: 3
|
||||
DEBUG: predicate pruning for shardId 103108
|
||||
DEBUG: predicate pruning for shardId 850001
|
||||
first_author | second_word_count
|
||||
--------------+-------------------
|
||||
10 | 19519
|
||||
|
@ -416,7 +418,7 @@ SELECT *
|
|||
WHERE author_id = 1
|
||||
LIMIT 2;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103108
|
||||
DEBUG: predicate pruning for shardId 850001
|
||||
DEBUG: Plan is router executable
|
||||
id | author_id | title | word_count
|
||||
----+-----------+----------+------------
|
||||
|
@ -432,7 +434,7 @@ SELECT id
|
|||
WHERE author_id = 1
|
||||
GROUP BY id;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103108
|
||||
DEBUG: predicate pruning for shardId 850001
|
||||
DEBUG: Plan is router executable
|
||||
id
|
||||
----
|
||||
|
@ -452,7 +454,7 @@ SELECT avg(word_count)
|
|||
FROM articles
|
||||
WHERE author_id = 2;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103107
|
||||
DEBUG: predicate pruning for shardId 850000
|
||||
DEBUG: Plan is router executable
|
||||
avg
|
||||
--------------------
|
||||
|
@ -466,7 +468,7 @@ SELECT max(word_count) as max, min(word_count) as min,
|
|||
FROM articles
|
||||
WHERE author_id = 2;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: predicate pruning for shardId 103107
|
||||
DEBUG: predicate pruning for shardId 850000
|
||||
DEBUG: Plan is router executable
|
||||
max | min | sum | cnt
|
||||
-------+------+-------+-----
|
||||
|
@ -477,7 +479,7 @@ DEBUG: Plan is router executable
|
|||
SELECT *
|
||||
FROM articles a, articles b
|
||||
WHERE a.id = b.id AND a.author_id = 1;
|
||||
DEBUG: predicate pruning for shardId 103108
|
||||
DEBUG: predicate pruning for shardId 850001
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
DEBUG: join prunable for task partitionId 0 and 3
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
-- MULTI_SINGLE_RELATION_SUBQUERY
|
||||
--
|
||||
-- This test checks that we are able to run selected set of distributed SQL subqueries.
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 860000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 860000;
|
||||
SET citus.task_executor_type TO 'task-tracker';
|
||||
select
|
||||
number_sum,
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
-- MULTI_TABLE_DDL
|
||||
--
|
||||
-- Tests around changing the schema and dropping of a distributed table
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 870000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 870000;
|
||||
CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL);
|
||||
SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append');
|
||||
master_create_distributed_table
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- MULTI_TASK_ASSIGNMENT
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 880000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 880000;
|
||||
SET citus.explain_distributed_queries TO off;
|
||||
-- Check that our policies for assigning tasks to worker nodes run as expected.
|
||||
-- To test this, we first create a shell table, and then manually insert shard
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- MULTI_TPCH_QUERY1
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 890000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 890000;
|
||||
-- Change configuration to treat lineitem and orders tables as large
|
||||
SET citus.large_table_shard_count TO 2;
|
||||
-- Query #1 from the TPC-H decision support benchmark
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
-- Query #10 from the TPC-H decision support benchmark. Unlike other TPC-H tests,
|
||||
-- we don't set citus.large_table_shard_count here, and instead use the default value
|
||||
-- coming from postgresql.conf or multi_task_tracker_executor.conf.
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 900000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 900000;
|
||||
SELECT
|
||||
c_custkey,
|
||||
c_name,
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- MULTI_TPCH_QUERY12
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 910000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 910000;
|
||||
-- Change configuration to treat lineitem and orders tables as large
|
||||
SET citus.large_table_shard_count TO 2;
|
||||
-- Query #12 from the TPC-H decision support benchmark
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- MULTI_TPCH_QUERY14
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 920000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 920000;
|
||||
-- Change configuration to treat lineitem and orders tables as large
|
||||
SET citus.large_table_shard_count TO 2;
|
||||
-- Query #14 from the TPC-H decision support benchmark
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- MULTI_TPCH_QUERY19
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 930000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 930000;
|
||||
-- Change configuration to treat lineitem and orders tables as large
|
||||
SET citus.large_table_shard_count TO 2;
|
||||
-- Query #19 from the TPC-H decision support benchmark. Note that we modified
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
-- Query #3 from the TPC-H decision support benchmark. Unlike other TPC-H tests,
|
||||
-- we don't set citus.large_table_shard_count here, and instead use the default value
|
||||
-- coming from postgresql.conf or multi_task_tracker_executor.conf.
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 940000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 940000;
|
||||
SELECT
|
||||
l_orderkey,
|
||||
sum(l_extendedprice * (1 - l_discount)) as revenue,
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- MULTI_TPCH_QUERY6
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 950000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 950000;
|
||||
-- Change configuration to treat lineitem and orders tables as large
|
||||
SET citus.large_table_shard_count TO 2;
|
||||
-- Query #6 from the TPC-H decision support benchmark
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- MULTI_TPCH_QUERY7
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 970000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 970000;
|
||||
-- Change configuration to treat lineitem AND orders tables as large
|
||||
SET citus.large_table_shard_count TO 2;
|
||||
-- Query #7 from the TPC-H decision support benchmark
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- MULTI_TPCH_QUERY7_NESTED
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 960000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 960000;
|
||||
-- Change configuration to treat lineitem AND orders tables AS large
|
||||
SET citus.large_table_shard_count TO 2;
|
||||
-- Query #7 from the TPC-H benchmark; modified to include sub-selects
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
-- this test file aims to test UPSERT feature on Citus
|
||||
-- note that output of this file for postgresql 9.4 will
|
||||
-- be full syntax errors, which is expected.
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 980000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 980000;
|
||||
CREATE TABLE upsert_test
|
||||
(
|
||||
part_key int UNIQUE,
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
-- this test file aims to test UPSERT feature on Citus
|
||||
-- note that output of this file for postgresql 9.4 will
|
||||
-- be full syntax errors, which is expected.
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 980000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 980000;
|
||||
CREATE TABLE upsert_test
|
||||
(
|
||||
part_key int UNIQUE,
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 990000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 990000;
|
||||
-- ===================================================================
|
||||
-- test utility statement functionality
|
||||
-- ===================================================================
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
-- Check that we can run utility statements with embedded SELECT statements on
|
||||
-- distributed tables. Currently we only support CREATE TABLE AS (SELECT..),
|
||||
-- DECLARE CURSOR, and COPY ... TO statements.
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1000000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1000000;
|
||||
CREATE TEMP TABLE lineitem_pricing_summary AS
|
||||
(
|
||||
SELECT
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
--
|
||||
-- Tests to check if we inform the user about potential caveats of creating new
|
||||
-- databases, schemas, and roles.
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1010000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1010000;
|
||||
CREATE DATABASE new_database;
|
||||
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
|
||||
DETAIL: Citus does not propagate CREATE DATABASE command to workers
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
--
|
||||
-- This test checks that we simply emit an error message instead of trying to
|
||||
-- fetch and join a shard which has an alias set.
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1020000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1020000;
|
||||
-- Show that the join works without an alias
|
||||
SELECT COUNT(*) FROM lineitem, part WHERE l_partkey = p_partkey;
|
||||
count
|
||||
|
@ -11,10 +13,10 @@ SELECT COUNT(*) FROM lineitem, part WHERE l_partkey = p_partkey;
|
|||
(1 row)
|
||||
|
||||
-- Assign an alias to the parts shard
|
||||
UPDATE pg_dist_shard SET shardalias = 'my_alias' WHERE shardid = 102019;
|
||||
UPDATE pg_dist_shard SET shardalias = 'my_alias' WHERE shardid = 290000;
|
||||
-- Attempt a join which uses this shard
|
||||
SELECT COUNT(*) FROM lineitem, part WHERE l_partkey = p_partkey;
|
||||
ERROR: cannot fetch shard 102019
|
||||
ERROR: cannot fetch shard 290000
|
||||
DETAIL: Fetching shards with aliases is currently unsupported
|
||||
-- Remove the alias from the parts shard
|
||||
UPDATE pg_dist_shard SET shardalias = NULL WHERE shardid = 102019;
|
||||
UPDATE pg_dist_shard SET shardalias = NULL WHERE shardid = 290000;
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
--
|
||||
-- This test checks that we simply emit an error message instead of trying to
|
||||
-- process a distributed unsupported SQL subquery.
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1030000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1030000;
|
||||
SELECT * FROM lineitem WHERE l_orderkey IN
|
||||
(SELECT l_orderkey FROM lineitem WHERE l_quantity > 0);
|
||||
ERROR: cannot perform distributed planning on this query
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
-- projection order are called working (resjunk) columns. We check in here that
|
||||
-- these columns are pulled to the master, and are correctly used in sorting and
|
||||
-- grouping.
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1040000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1040000;
|
||||
SELECT l_quantity FROM lineitem ORDER BY l_shipdate, l_quantity LIMIT 20;
|
||||
l_quantity
|
||||
------------
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- TASK_TRACKER_ASSIGN_TASK
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1050000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1050000;
|
||||
\set JobId 401010
|
||||
\set SimpleTaskId 101101
|
||||
\set RecoverableTaskId 801102
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- TASK_TRACKER_CLEANUP_JOB
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1060000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1060000;
|
||||
\set JobId 401010
|
||||
\set CompletedTaskId 801107
|
||||
\set RunningTaskId 801108
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- TASK_TRACKER_CREATE_TABLE
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1070000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1070000;
|
||||
-- New table definitions to test the task tracker process and protocol
|
||||
CREATE TABLE lineitem_simple_task ( LIKE lineitem );
|
||||
CREATE TABLE lineitem_compute_task ( LIKE lineitem );
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- TASK_TRACKER_PARTITION_TASK
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1080000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1080000;
|
||||
\set JobId 401010
|
||||
\set PartitionTaskId 801106
|
||||
\set PartitionColumn l_orderkey
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- WORKER_BINARY_DATA_PARTITION
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1090000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1090000;
|
||||
\set JobId 201010
|
||||
\set TaskId 101105
|
||||
\set Partition_Column textcolumn
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- WORKER_CHECK_INVALID_ARGUMENTS
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1100000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1100000;
|
||||
\set JobId 201010
|
||||
\set TaskId 101108
|
||||
\set Table_Name simple_binary_data_table
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
-- Create new table definitions for lineitem and supplier tables to test worker
|
||||
-- node execution logic. For now,the tests include range and hash partitioning
|
||||
-- of existing tables.
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1110000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1110000;
|
||||
CREATE TABLE lineitem (
|
||||
l_orderkey bigint not null,
|
||||
l_partkey integer not null,
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- WORKER_HASH_PARTITION
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1130000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1130000;
|
||||
\set JobId 201010
|
||||
\set TaskId 101103
|
||||
\set Partition_Column l_orderkey
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- WORKER_HASH_PARTITION_COMPLEX
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1120000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1120000;
|
||||
\set JobId 201010
|
||||
\set TaskId 101104
|
||||
\set Partition_Column l_partkey
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- WORKER_MERGE_HASH_FILES
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1140000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1140000;
|
||||
\set JobId 201010
|
||||
\set TaskId 101103
|
||||
\set Task_Table_Name public.task_101103
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- WORKER_MERGE_RANGE_FILES
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1150000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1150000;
|
||||
\set JobId 201010
|
||||
\set TaskId 101101
|
||||
\set Task_Table_Name public.task_101101
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- WORKER_NULL_DATA_PARTITION
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1180000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1180000;
|
||||
\set JobId 201010
|
||||
\set Range_TaskId 101106
|
||||
\set Partition_Column s_nationkey
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- WORKER_RANGE_PARTITION
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1160000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1160000;
|
||||
\set JobId 201010
|
||||
\set TaskId 101101
|
||||
\set Partition_Column l_orderkey
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- WORKER_RANGE_PARTITION_COMPLEX
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1170000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1170000;
|
||||
\set JobId 201010
|
||||
\set TaskId 101102
|
||||
\set Partition_Column l_partkey
|
||||
|
|
|
@ -2,8 +2,12 @@
|
|||
-- MULTI_AGG_DISTINCT
|
||||
--
|
||||
|
||||
-- Create a new range partitioned lineitem table and stage data into it
|
||||
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 200000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 200000;
|
||||
|
||||
|
||||
-- Create a new range partitioned lineitem table and stage data into it
|
||||
CREATE TABLE lineitem_range (
|
||||
l_orderkey bigint not null,
|
||||
l_partkey integer not null,
|
||||
|
|
|
@ -2,8 +2,12 @@
|
|||
-- MULTI_AGG_TYPE_CONVERSION
|
||||
--
|
||||
|
||||
-- Test aggregate type conversions using sums of integers and division operator
|
||||
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 210000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 210000;
|
||||
|
||||
|
||||
-- Test aggregate type conversions using sums of integers and division operator
|
||||
SELECT sum(l_suppkey) FROM lineitem;
|
||||
SELECT sum(l_suppkey) / 2 FROM lineitem;
|
||||
SELECT sum(l_suppkey) / 2::numeric FROM lineitem;
|
||||
|
|
|
@ -2,11 +2,14 @@
|
|||
-- MULTI_ALTER_TABLE_STATEMENTS
|
||||
--
|
||||
|
||||
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 220000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 220000;
|
||||
|
||||
|
||||
-- Check that we can run ALTER TABLE statements on distributed tables.
|
||||
-- We set the shardid sequence here so that the shardids in this test
|
||||
-- aren't affected by changes to the previous tests.
|
||||
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 103000;
|
||||
CREATE TABLE lineitem_alter (
|
||||
l_orderkey bigint not null,
|
||||
l_partkey integer not null,
|
||||
|
@ -178,7 +181,7 @@ ALTER TABLE lineitem_alter ADD COLUMN column_only_added_to_master int;
|
|||
|
||||
-- verify newly added column is not present in a worker shard
|
||||
\c - - - :worker_1_port
|
||||
SELECT column_only_added_to_master FROM lineitem_alter_103000 LIMIT 0;
|
||||
SELECT column_only_added_to_master FROM lineitem_alter_220000 LIMIT 0;
|
||||
\c - - - :master_port
|
||||
|
||||
-- ddl propagation flag is reset to default, disable it again
|
||||
|
|
|
@ -1,6 +1,12 @@
|
|||
--
|
||||
-- MULTI_APPEND_TABLE_TO_SHARD
|
||||
--
|
||||
|
||||
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 230000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 230000;
|
||||
|
||||
|
||||
-- Initialize tables to join
|
||||
CREATE TABLE multi_append_table_to_shard_right
|
||||
(
|
||||
|
|
|
@ -2,6 +2,11 @@
|
|||
-- COMPLEX_COUNT_DISTINCT
|
||||
--
|
||||
|
||||
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 240000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 240000;
|
||||
|
||||
|
||||
CREATE TABLE lineitem_hash (
|
||||
l_orderkey bigint not null,
|
||||
l_partkey integer not null,
|
||||
|
|
|
@ -2,7 +2,10 @@
|
|||
-- MULTI_COPY
|
||||
--
|
||||
|
||||
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 560000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 560000;
|
||||
|
||||
|
||||
-- Create a new hash-partitioned table into which to COPY
|
||||
CREATE TABLE customer_copy_hash (
|
||||
|
|
|
@ -1,3 +1,8 @@
|
|||
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 250000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 250000;
|
||||
|
||||
|
||||
CREATE SCHEMA tpch
|
||||
CREATE TABLE nation (
|
||||
n_nationkey integer not null,
|
||||
|
|
|
@ -6,7 +6,10 @@
|
|||
-- differs from previous tests in that it modifies the *internal* shardId
|
||||
-- generator, forcing the distributed database to use 64-bit shard identifiers.
|
||||
|
||||
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 100200300400500;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 9000000;
|
||||
|
||||
|
||||
-- Stage additional data to start using large shard identifiers.
|
||||
|
||||
|
|
|
@ -2,6 +2,11 @@
|
|||
-- MULTI_FDW_STAGE_DATA
|
||||
--
|
||||
|
||||
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 330000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 330000;
|
||||
|
||||
|
||||
-- Tests for staging foreign data in a distributed cluster.
|
||||
|
||||
\STAGE lineitem FROM '@abs_srcdir@/data/lineitem.1.data'
|
||||
|
|
|
@ -6,7 +6,10 @@
|
|||
-- differs from previous tests in that it modifies the *internal* shardId
|
||||
-- generator, forcing the distributed database to use 64-bit shard identifiers.
|
||||
|
||||
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 100200300400500;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 9000000;
|
||||
|
||||
|
||||
-- Stage additional data to start using large shard identifiers.
|
||||
|
||||
|
|
|
@ -2,8 +2,12 @@
|
|||
-- MULTI_MASTER_DELETE_PROTOCOL
|
||||
--
|
||||
|
||||
-- Create a new range partitioned customer_delete_protocol table and stage data into it.
|
||||
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 320000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 320000;
|
||||
|
||||
|
||||
-- Create a new range partitioned customer_delete_protocol table and stage data into it.
|
||||
CREATE TABLE customer_delete_protocol (
|
||||
c_custkey integer not null,
|
||||
c_name varchar(25) not null,
|
||||
|
|
|
@ -1,3 +1,8 @@
|
|||
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 310000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 310000;
|
||||
|
||||
|
||||
SET citus.large_table_shard_count TO 2;
|
||||
SET citus.log_multi_join_order to true;
|
||||
SET client_min_messages TO LOG;
|
||||
|
|
|
@ -1,12 +1,16 @@
|
|||
--
|
||||
-- MULTI_STAGE_DATA
|
||||
--
|
||||
|
||||
-- Tests for staging data in a distributed cluster. Please note that the number
|
||||
-- of shards uploaded depends on two config values: citus.shard_replication_factor and
|
||||
-- citus.shard_max_size. These values are set in pg_regress_multi.pl. Shard placement
|
||||
-- policy is left to the default value (round-robin) to test the common install case.
|
||||
|
||||
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 290000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 290000;
|
||||
|
||||
|
||||
\STAGE lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
||||
\STAGE lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
|
||||
|
||||
|
|
|
@ -1,11 +1,15 @@
|
|||
--
|
||||
-- MULTI_STAGE_LARGE_RECORDS
|
||||
--
|
||||
|
||||
-- Tests for staging data with large records (i.e. greater than the read buffer
|
||||
-- size, which is 32kB) in a distributed cluster. These tests make sure that we
|
||||
-- are creating shards of correct size even when records are large.
|
||||
|
||||
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 300000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 300000;
|
||||
|
||||
|
||||
SET citus.shard_max_size TO "256kB";
|
||||
|
||||
CREATE TABLE large_records_table (data_id integer, data text);
|
||||
|
|
|
@ -2,6 +2,11 @@
|
|||
-- MULTI_STAGE_MORE_DATA
|
||||
--
|
||||
|
||||
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 280000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 280000;
|
||||
|
||||
|
||||
-- We stage more data to customer and part tables to test distributed joins. The
|
||||
-- staging causes the planner to consider customer and part tables as large, and
|
||||
-- evaluate plans where some of the underlying tables need to be repartitioned.
|
||||
|
|
|
@ -2,8 +2,12 @@
|
|||
-- MULTI_SUBQUERY
|
||||
--
|
||||
|
||||
-- Create tables for subquery tests
|
||||
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 270000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 270000;
|
||||
|
||||
|
||||
-- Create tables for subquery tests
|
||||
CREATE TABLE lineitem_subquery (
|
||||
l_orderkey bigint not null,
|
||||
l_partkey integer not null,
|
||||
|
@ -101,7 +105,7 @@ FROM
|
|||
|
||||
-- Update metadata in order to make all shards equal.
|
||||
|
||||
UPDATE pg_dist_shard SET shardmaxvalue = '14947' WHERE shardid = 102024;
|
||||
UPDATE pg_dist_shard SET shardmaxvalue = '14947' WHERE shardid = 270003;
|
||||
|
||||
-- If group by is not on partition column then we error out.
|
||||
|
||||
|
@ -350,5 +354,3 @@ SELECT * FROM
|
|||
AS foo;
|
||||
|
||||
DROP TABLE subquery_pruning_varchar_test_table;
|
||||
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 102026;
|
||||
|
|
|
@ -2,6 +2,11 @@
|
|||
-- WORKER_COPY
|
||||
--
|
||||
|
||||
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 260000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 260000;
|
||||
|
||||
|
||||
COPY lineitem FROM '@abs_srcdir@/data/lineitem.1.data' WITH DELIMITER '|';
|
||||
COPY lineitem FROM '@abs_srcdir@/data/lineitem.2.data' WITH DELIMITER '|';
|
||||
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- MULTI_AGG_DISTINCT
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 200000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 200000;
|
||||
-- Create a new range partitioned lineitem table and stage data into it
|
||||
CREATE TABLE lineitem_range (
|
||||
l_orderkey bigint not null,
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- MULTI_AGG_TYPE_CONVERSION
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 210000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 210000;
|
||||
-- Test aggregate type conversions using sums of integers and division operator
|
||||
SELECT sum(l_suppkey) FROM lineitem;
|
||||
sum
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
--
|
||||
-- MULTI_ALTER_TABLE_STATEMENTS
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 220000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 220000;
|
||||
-- Check that we can run ALTER TABLE statements on distributed tables.
|
||||
-- We set the shardid sequence here so that the shardids in this test
|
||||
-- aren't affected by changes to the previous tests.
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 103000;
|
||||
CREATE TABLE lineitem_alter (
|
||||
l_orderkey bigint not null,
|
||||
l_partkey integer not null,
|
||||
|
@ -166,10 +167,10 @@ ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP DEFAULT;
|
|||
\STAGE lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
||||
ERROR: null value in column "int_column2" violates not-null constraint
|
||||
DETAIL: Failing row contains (1, 155190, 7706, 1, 17.00, 21168.23, 0.04, 0.02, N, O, 03-13-1996, 02-12-1996, 03-22-1996, DELIVER IN PERSON , TRUCK , egular courts above the, 1, null, null, null, null).
|
||||
CONTEXT: COPY lineitem_alter_103006, line 1: "1|155190|7706|1|17|21168.23|0.04|0.02|N|O|1996-03-13|1996-02-12|1996-03-22|DELIVER IN PERSON|TRUCK|e..."
|
||||
CONTEXT: COPY lineitem_alter_220006, line 1: "1|155190|7706|1|17|21168.23|0.04|0.02|N|O|1996-03-13|1996-02-12|1996-03-22|DELIVER IN PERSON|TRUCK|e..."
|
||||
ERROR: null value in column "int_column2" violates not-null constraint
|
||||
DETAIL: Failing row contains (1, 155190, 7706, 1, 17.00, 21168.23, 0.04, 0.02, N, O, 03-13-1996, 02-12-1996, 03-22-1996, DELIVER IN PERSON , TRUCK , egular courts above the, 1, null, null, null, null).
|
||||
CONTEXT: COPY lineitem_alter_103006, line 1: "1|155190|7706|1|17|21168.23|0.04|0.02|N|O|1996-03-13|1996-02-12|1996-03-22|DELIVER IN PERSON|TRUCK|e..."
|
||||
CONTEXT: COPY lineitem_alter_220006, line 1: "1|155190|7706|1|17|21168.23|0.04|0.02|N|O|1996-03-13|1996-02-12|1996-03-22|DELIVER IN PERSON|TRUCK|e..."
|
||||
\stage: failed to replicate shard to enough replicas
|
||||
-- Verify that DROP NOT NULL works
|
||||
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP NOT NULL;
|
||||
|
@ -261,7 +262,7 @@ NOTICE: relation "non_existent_table" does not exist, skipping
|
|||
ALTER TABLE IF EXISTS lineitem_alter ALTER COLUMN int_column2 SET DATA TYPE INTEGER;
|
||||
ALTER TABLE lineitem_alter DROP COLUMN non_existent_column;
|
||||
WARNING: Bad result from localhost:57638
|
||||
DETAIL: Remote message: column "non_existent_column" of relation "lineitem_alter_103000" does not exist
|
||||
DETAIL: Remote message: column "non_existent_column" of relation "lineitem_alter_220000" does not exist
|
||||
ERROR: could not execute DDL command on worker node shards
|
||||
ALTER TABLE lineitem_alter DROP COLUMN IF EXISTS non_existent_column;
|
||||
NOTICE: column "non_existent_column" of relation "lineitem_alter" does not exist, skipping
|
||||
|
@ -469,9 +470,9 @@ ALTER TABLE lineitem_renamed RENAME TO lineitem_alter;
|
|||
ALTER TABLE lineitem_alter ADD COLUMN column_only_added_to_master int;
|
||||
-- verify newly added column is not present in a worker shard
|
||||
\c - - - :worker_1_port
|
||||
SELECT column_only_added_to_master FROM lineitem_alter_103000 LIMIT 0;
|
||||
SELECT column_only_added_to_master FROM lineitem_alter_220000 LIMIT 0;
|
||||
ERROR: column "column_only_added_to_master" does not exist
|
||||
LINE 1: SELECT column_only_added_to_master FROM lineitem_alter_10300...
|
||||
LINE 1: SELECT column_only_added_to_master FROM lineitem_alter_22000...
|
||||
^
|
||||
\c - - - :master_port
|
||||
-- ddl propagation flag is reset to default, disable it again
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--
|
||||
-- MULTI_APPEND_TABLE_TO_SHARD
|
||||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 230000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 230000;
|
||||
-- Initialize tables to join
|
||||
CREATE TABLE multi_append_table_to_shard_right
|
||||
(
|
||||
|
@ -131,7 +133,7 @@ SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage'
|
|||
FROM
|
||||
pg_dist_shard
|
||||
WHERE 'multi_append_table_to_shard_right_hash'::regclass::oid = logicalrelid;
|
||||
ERROR: cannot append to shardId 103011
|
||||
ERROR: cannot append to shardId 230000
|
||||
DETAIL: We currently don't support appending to shards in hash-partitioned tables
|
||||
-- Clean up after test
|
||||
SELECT master_apply_delete_command('DELETE FROM multi_append_table_to_shard_right');
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue