Replace verb 'stage' with 'load' in test comments

"Staging table" will be the only valid use of 'stage' from now on, we
will now say "load" when talking about data ingestion. If creation of
shards is its own step, we'll just say "shard creation".
pull/630/head
Jason Petersen 2016-08-22 13:24:18 -06:00
parent 35e9f51348
commit b391abda3d
No known key found for this signature in database
GPG Key ID: 9F1D3510D110ABA9
25 changed files with 33 additions and 33 deletions

View File

@ -5,7 +5,7 @@
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 630000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 630000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 630000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 630000;
-- Create a table partitioned on integer column and update partition type to -- Create a table partitioned on integer column and update partition type to
-- hash. Then stage data to this table and update shard min max values with -- hash. Then load data into this table and update shard min max values with
-- hashed ones. Hash value of 1, 2, 3 and 4 are consecutively -1905060026, -- hashed ones. Hash value of 1, 2, 3 and 4 are consecutively -1905060026,
-- 1134484726, -28094569 and -1011077333. -- 1134484726, -28094569 and -1011077333.
CREATE TABLE orders_hash_partitioned ( CREATE TABLE orders_hash_partitioned (

View File

@ -5,7 +5,7 @@
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 630000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 630000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 630000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 630000;
-- Create a table partitioned on integer column and update partition type to -- Create a table partitioned on integer column and update partition type to
-- hash. Then stage data to this table and update shard min max values with -- hash. Then load data into this table and update shard min max values with
-- hashed ones. Hash value of 1, 2, 3 and 4 are consecutively -1905060026, -- hashed ones. Hash value of 1, 2, 3 and 4 are consecutively -1905060026,
-- 1134484726, -28094569 and -1011077333. -- 1134484726, -28094569 and -1011077333.
CREATE TABLE orders_hash_partitioned ( CREATE TABLE orders_hash_partitioned (

View File

@ -9,7 +9,7 @@ SET citus.log_multi_join_order TO TRUE;
SET client_min_messages TO LOG; SET client_min_messages TO LOG;
-- Change configuration to treat lineitem, orders, customer, and part tables as -- Change configuration to treat lineitem, orders, customer, and part tables as
-- large. The following queries are basically the same as the ones in tpch_small -- large. The following queries are basically the same as the ones in tpch_small
-- except that more data has been staged to customer and part tables. Therefore, -- except that more data has been loaded into customer and part tables. Therefore,
-- we will apply different distributed join strategies for these queries. -- we will apply different distributed join strategies for these queries.
SET citus.large_table_shard_count TO 2; SET citus.large_table_shard_count TO 2;
-- Query #6 from the TPC-H decision support benchmark -- Query #6 from the TPC-H decision support benchmark

View File

@ -3,7 +3,7 @@
-- --
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 350000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 350000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 350000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 350000;
-- Create a new hash partitioned multi_shard_modify_test table and stage data into it. -- Create a new hash partitioned multi_shard_modify_test table and load data into it.
CREATE TABLE multi_shard_modify_test ( CREATE TABLE multi_shard_modify_test (
t_key integer not null, t_key integer not null,
t_name varchar(25) not null, t_name varchar(25) not null,

View File

@ -7,7 +7,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 200000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 200000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 200000;
-- Create a new range partitioned lineitem table and stage data into it -- Create a new range partitioned lineitem table and load data into it
CREATE TABLE lineitem_range ( CREATE TABLE lineitem_range (
l_orderkey bigint not null, l_orderkey bigint not null,
l_partkey integer not null, l_partkey integer not null,

View File

@ -14,7 +14,7 @@ SELECT sum(l_suppkey) / 2::numeric FROM lineitem;
SELECT sum(l_suppkey)::int8 / 2 FROM lineitem; SELECT sum(l_suppkey)::int8 / 2 FROM lineitem;
-- Create a new table to test type conversions on different types, and stage -- Create a new table to test type conversions on different types, and load
-- data into this table. Then, apply aggregate functions and divide / multiply -- data into this table. Then, apply aggregate functions and divide / multiply
-- the results to test type conversions. -- the results to test type conversions.

View File

@ -111,7 +111,7 @@ SELECT master_create_empty_shard('multi_append_table_to_shard_date');
SELECT * FROM multi_append_table_to_shard_date; SELECT * FROM multi_append_table_to_shard_date;
-- Stage an empty table and check that we can query the distributed table -- Create an empty distributed table and check that we can query it
CREATE TABLE multi_append_table_to_shard_stage (LIKE multi_append_table_to_shard_date); CREATE TABLE multi_append_table_to_shard_stage (LIKE multi_append_table_to_shard_date);
SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636)
FROM FROM
@ -120,7 +120,7 @@ WHERE 'multi_append_table_to_shard_date'::regclass::oid = logicalrelid;
SELECT * FROM multi_append_table_to_shard_date; SELECT * FROM multi_append_table_to_shard_date;
-- Stage NULL values and check that we can query the table -- INSERT NULL values and check that we can query the table
INSERT INTO multi_append_table_to_shard_stage VALUES (NULL, NULL); INSERT INTO multi_append_table_to_shard_stage VALUES (NULL, NULL);
SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636)
FROM FROM
@ -129,7 +129,7 @@ WHERE 'multi_append_table_to_shard_date'::regclass::oid = logicalrelid;
SELECT * FROM multi_append_table_to_shard_date; SELECT * FROM multi_append_table_to_shard_date;
-- Stage regular values and check that we can query the table -- INSERT regular values and check that we can query the table
INSERT INTO multi_append_table_to_shard_stage VALUES ('2016-01-01', 3); INSERT INTO multi_append_table_to_shard_stage VALUES ('2016-01-01', 3);
SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636)
FROM FROM

View File

@ -2,7 +2,7 @@
-- MULTI_LARGE_SHARDID -- MULTI_LARGE_SHARDID
-- --
-- Stage data to distributed tables, and run TPC-H query #1 and #6. This test -- Load data into distributed tables, and run TPC-H query #1 and #6. This test
-- differs from previous tests in that it modifies the *internal* shardId -- differs from previous tests in that it modifies the *internal* shardId
-- generator, forcing the distributed database to use 64-bit shard identifiers. -- generator, forcing the distributed database to use 64-bit shard identifiers.
@ -11,7 +11,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 100200300400500;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 9000000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 9000000;
-- Stage additional data to start using large shard identifiers. -- Load additional data to start using large shard identifiers.
\copy lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' \copy lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' \copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'

View File

@ -7,7 +7,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 320000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 320000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 320000;
-- Create a new range partitioned customer_delete_protocol table and stage data into it. -- Create a new range partitioned customer_delete_protocol table and load data into it.
CREATE TABLE customer_delete_protocol ( CREATE TABLE customer_delete_protocol (
c_custkey integer not null, c_custkey integer not null,
c_name varchar(25) not null, c_name varchar(25) not null,

View File

@ -1,7 +1,7 @@
-- --
-- MULTI_STAGE_DATA -- MULTI_STAGE_DATA
-- --
-- Tests for staging data in a distributed cluster. Please note that the number -- Tests for loading data in a distributed cluster. Please note that the number
-- of shards uploaded depends on two config values: citus.shard_replication_factor and -- of shards uploaded depends on two config values: citus.shard_replication_factor and
-- citus.shard_max_size. These values are set in pg_regress_multi.pl. Shard placement -- citus.shard_max_size. These values are set in pg_regress_multi.pl. Shard placement
-- policy is left to the default value (round-robin) to test the common install case. -- policy is left to the default value (round-robin) to test the common install case.

View File

@ -1,7 +1,7 @@
-- --
-- MULTI_STAGE_LARGE_RECORDS -- MULTI_STAGE_LARGE_RECORDS
-- --
-- Tests for staging data with large records (i.e. greater than the read buffer -- Tests for loading data with large records (i.e. greater than the read buffer
-- size, which is 32kB) in a distributed cluster. These tests make sure that we -- size, which is 32kB) in a distributed cluster. These tests make sure that we
-- are creating shards of correct size even when records are large. -- are creating shards of correct size even when records are large.

View File

@ -7,8 +7,8 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 280000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 280000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 280000;
-- We stage more data to customer and part tables to test distributed joins. The -- We load more data to customer and part tables to test distributed joins. The
-- staging causes the planner to consider customer and part tables as large, and -- loading causes the planner to consider customer and part tables as large, and
-- evaluate plans where some of the underlying tables need to be repartitioned. -- evaluate plans where some of the underlying tables need to be repartitioned.
\copy customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' \copy customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|'

View File

@ -77,7 +77,7 @@ FROM
GROUP BY GROUP BY
l_orderkey) AS unit_prices; l_orderkey) AS unit_prices;
-- Stage data to tables. -- Load data into tables.
SELECT master_create_empty_shard('lineitem_subquery') AS new_shard_id SELECT master_create_empty_shard('lineitem_subquery') AS new_shard_id
\gset \gset

View File

@ -3,7 +3,7 @@
-- --
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 200000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 200000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 200000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 200000;
-- Create a new range partitioned lineitem table and stage data into it -- Create a new range partitioned lineitem table and load data into it
CREATE TABLE lineitem_range ( CREATE TABLE lineitem_range (
l_orderkey bigint not null, l_orderkey bigint not null,
l_partkey integer not null, l_partkey integer not null,

View File

@ -28,7 +28,7 @@ SELECT sum(l_suppkey)::int8 / 2 FROM lineitem;
30308988 30308988
(1 row) (1 row)
-- Create a new table to test type conversions on different types, and stage -- Create a new table to test type conversions on different types, and load
-- data into this table. Then, apply aggregate functions and divide / multiply -- data into this table. Then, apply aggregate functions and divide / multiply
-- the results to test type conversions. -- the results to test type conversions.
CREATE TABLE aggregate_type ( CREATE TABLE aggregate_type (

View File

@ -175,7 +175,7 @@ SELECT * FROM multi_append_table_to_shard_date;
------------+------- ------------+-------
(0 rows) (0 rows)
-- Stage an empty table and check that we can query the distributed table -- Create an empty distributed table and check that we can query it
CREATE TABLE multi_append_table_to_shard_stage (LIKE multi_append_table_to_shard_date); CREATE TABLE multi_append_table_to_shard_stage (LIKE multi_append_table_to_shard_date);
SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636)
FROM FROM
@ -191,7 +191,7 @@ SELECT * FROM multi_append_table_to_shard_date;
------------+------- ------------+-------
(0 rows) (0 rows)
-- Stage NULL values and check that we can query the table -- INSERT NULL values and check that we can query the table
INSERT INTO multi_append_table_to_shard_stage VALUES (NULL, NULL); INSERT INTO multi_append_table_to_shard_stage VALUES (NULL, NULL);
SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636)
FROM FROM
@ -208,7 +208,7 @@ SELECT * FROM multi_append_table_to_shard_date;
| |
(1 row) (1 row)
-- Stage regular values and check that we can query the table -- INSERT regular values and check that we can query the table
INSERT INTO multi_append_table_to_shard_stage VALUES ('2016-01-01', 3); INSERT INTO multi_append_table_to_shard_stage VALUES ('2016-01-01', 3);
SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636)
FROM FROM

View File

@ -1,12 +1,12 @@
-- --
-- MULTI_LARGE_SHARDID -- MULTI_LARGE_SHARDID
-- --
-- Stage data to distributed tables, and run TPC-H query #1 and #6. This test -- Load data into distributed tables, and run TPC-H query #1 and #6. This test
-- differs from previous tests in that it modifies the *internal* shardId -- differs from previous tests in that it modifies the *internal* shardId
-- generator, forcing the distributed database to use 64-bit shard identifiers. -- generator, forcing the distributed database to use 64-bit shard identifiers.
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 100200300400500; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 100200300400500;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 9000000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 9000000;
-- Stage additional data to start using large shard identifiers. -- Load additional data to start using large shard identifiers.
\copy lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' \copy lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' \copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
-- Query #1 from the TPC-H decision support benchmark. -- Query #1 from the TPC-H decision support benchmark.

View File

@ -3,7 +3,7 @@
-- --
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 320000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 320000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 320000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 320000;
-- Create a new range partitioned customer_delete_protocol table and stage data into it. -- Create a new range partitioned customer_delete_protocol table and load data into it.
CREATE TABLE customer_delete_protocol ( CREATE TABLE customer_delete_protocol (
c_custkey integer not null, c_custkey integer not null,
c_name varchar(25) not null, c_name varchar(25) not null,

View File

@ -1,7 +1,7 @@
-- --
-- MULTI_STAGE_DATA -- MULTI_STAGE_DATA
-- --
-- Tests for staging data in a distributed cluster. Please note that the number -- Tests for loading data in a distributed cluster. Please note that the number
-- of shards uploaded depends on two config values: citus.shard_replication_factor and -- of shards uploaded depends on two config values: citus.shard_replication_factor and
-- citus.shard_max_size. These values are set in pg_regress_multi.pl. Shard placement -- citus.shard_max_size. These values are set in pg_regress_multi.pl. Shard placement
-- policy is left to the default value (round-robin) to test the common install case. -- policy is left to the default value (round-robin) to test the common install case.

View File

@ -1,7 +1,7 @@
-- --
-- MULTI_STAGE_LARGE_RECORDS -- MULTI_STAGE_LARGE_RECORDS
-- --
-- Tests for staging data with large records (i.e. greater than the read buffer -- Tests for loading data with large records (i.e. greater than the read buffer
-- size, which is 32kB) in a distributed cluster. These tests make sure that we -- size, which is 32kB) in a distributed cluster. These tests make sure that we
-- are creating shards of correct size even when records are large. -- are creating shards of correct size even when records are large.
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 300000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 300000;

View File

@ -3,8 +3,8 @@
-- --
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 280000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 280000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 280000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 280000;
-- We stage more data to customer and part tables to test distributed joins. The -- We load more data to customer and part tables to test distributed joins. The
-- staging causes the planner to consider customer and part tables as large, and -- loading causes the planner to consider customer and part tables as large, and
-- evaluate plans where some of the underlying tables need to be repartitioned. -- evaluate plans where some of the underlying tables need to be repartitioned.
\copy customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' \copy customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|'
\copy customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' \copy customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|'

View File

@ -82,7 +82,7 @@ FROM
(1 row) (1 row)
-- Stage data to tables. -- Load data into tables.
SELECT master_create_empty_shard('lineitem_subquery') AS new_shard_id SELECT master_create_empty_shard('lineitem_subquery') AS new_shard_id
\gset \gset
UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986 UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986

View File

@ -10,7 +10,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 630000;
-- Create a table partitioned on integer column and update partition type to -- Create a table partitioned on integer column and update partition type to
-- hash. Then stage data to this table and update shard min max values with -- hash. Then load data into this table and update shard min max values with
-- hashed ones. Hash value of 1, 2, 3 and 4 are consecutively -1905060026, -- hashed ones. Hash value of 1, 2, 3 and 4 are consecutively -1905060026,
-- 1134484726, -28094569 and -1011077333. -- 1134484726, -28094569 and -1011077333.

View File

@ -15,7 +15,7 @@ SET client_min_messages TO LOG;
-- Change configuration to treat lineitem, orders, customer, and part tables as -- Change configuration to treat lineitem, orders, customer, and part tables as
-- large. The following queries are basically the same as the ones in tpch_small -- large. The following queries are basically the same as the ones in tpch_small
-- except that more data has been staged to customer and part tables. Therefore, -- except that more data has been loaded into customer and part tables. Therefore,
-- we will apply different distributed join strategies for these queries. -- we will apply different distributed join strategies for these queries.
SET citus.large_table_shard_count TO 2; SET citus.large_table_shard_count TO 2;

View File

@ -7,7 +7,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 350000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 350000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 350000;
-- Create a new hash partitioned multi_shard_modify_test table and stage data into it. -- Create a new hash partitioned multi_shard_modify_test table and load data into it.
CREATE TABLE multi_shard_modify_test ( CREATE TABLE multi_shard_modify_test (
t_key integer not null, t_key integer not null,
t_name varchar(25) not null, t_name varchar(25) not null,