Tests are updated to use create_distributed_table

pull/2143/head
mehmet furkan şahin 2018-05-02 13:29:57 +03:00
parent ae97df43be
commit 785a86ed0a
87 changed files with 2007 additions and 1081 deletions

View File

@ -117,6 +117,8 @@ SELECT count(DISTINCT l_orderkey) as distinct_order_count, l_quantity FROM linei
-- Check that approximate count(distinct) works at a table in a schema other than public
-- create necessary objects
SET citus.next_shard_id TO 20000000;
SET citus.next_placement_id TO 20000000;
CREATE SCHEMA test_count_distinct_schema;
CREATE TABLE test_count_distinct_schema.nation_hash(
n_nationkey integer not null,
@ -124,15 +126,9 @@ CREATE TABLE test_count_distinct_schema.nation_hash(
n_regionkey integer not null,
n_comment varchar(152)
);
SELECT master_create_distributed_table('test_count_distinct_schema.nation_hash', 'n_nationkey', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('test_count_distinct_schema.nation_hash', 4, 2);
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('test_count_distinct_schema.nation_hash', 'n_nationkey', 'hash');
create_distributed_table
--------------------------
(1 row)

View File

@ -1,9 +1,11 @@
SET citus.next_shard_id TO 1601000;
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 1;
CREATE TABLE tab9 (test_id integer NOT NULL, data int);
CREATE TABLE tab10 (test_id integer NOT NULL, data int);
SELECT master_create_distributed_table('tab9', 'test_id', 'hash');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('tab9', 'test_id', 'hash');
create_distributed_table
--------------------------
(1 row)
@ -13,12 +15,6 @@ SELECT master_create_distributed_table('tab10', 'test_id', 'hash');
(1 row)
SELECT master_create_worker_shards('tab9', 1, 1);
master_create_worker_shards
-----------------------------
(1 row)
TRUNCATE tab9;
UPDATE pg_dist_shard SET logicalrelid = 'tab10'::regclass WHERE logicalrelid = 'tab9'::regclass;
TRUNCATE tab10;

View File

@ -375,16 +375,11 @@ SELECT * FROM run_command_on_workers('select count(*) from pg_dist_partition') O
(2 rows)
-- make sure run_on_all_placements respects shardstate
SET citus.shard_count TO 5;
CREATE TABLE check_placements (key int);
SELECT master_create_distributed_table('check_placements', 'key', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('check_placements', 5, 2);
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('check_placements', 'key', 'hash');
create_distributed_table
--------------------------
(1 row)
@ -420,28 +415,17 @@ SELECT * FROM run_command_on_placements('check_placements', 'select 1');
DROP TABLE check_placements CASCADE;
-- make sure run_on_all_colocated_placements correctly detects colocation
CREATE TABLE check_colocated (key int);
SELECT master_create_distributed_table('check_colocated', 'key', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('check_colocated', 5, 2);
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('check_colocated', 'key', 'hash');
create_distributed_table
--------------------------
(1 row)
CREATE TABLE second_table (key int);
SELECT master_create_distributed_table('second_table', 'key', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('second_table', 4, 2);
master_create_worker_shards
-----------------------------
SET citus.shard_count TO 4;
SELECT create_distributed_table('second_table', 'key', 'hash');
create_distributed_table
--------------------------
(1 row)
@ -526,16 +510,11 @@ SELECT * FROM run_command_on_colocated_placements('check_colocated', 'second_tab
DROP TABLE check_colocated CASCADE;
DROP TABLE second_table CASCADE;
-- runs on all shards
SET citus.shard_count TO 5;
CREATE TABLE check_shards (key int);
SELECT master_create_distributed_table('check_shards', 'key', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('check_shards', 5, 2);
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('check_shards', 'key', 'hash');
create_distributed_table
--------------------------
(1 row)

View File

@ -76,6 +76,8 @@ SELECT master_get_active_worker_nodes();
(1 row)
-- add some shard placements to the cluster
SET citus.shard_count TO 16;
SET citus.shard_replication_factor TO 1;
SELECT isactive FROM master_activate_node('localhost', :worker_2_port);
isactive
----------
@ -83,15 +85,9 @@ SELECT isactive FROM master_activate_node('localhost', :worker_2_port);
(1 row)
CREATE TABLE cluster_management_test (col_1 text, col_2 int);
SELECT master_create_distributed_table('cluster_management_test', 'col_1', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('cluster_management_test', 16, 1);
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash');
create_distributed_table
--------------------------
(1 row)
@ -373,6 +369,7 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port);
(1 row)
-- check that a distributed table can be created after adding a node in a transaction
SET citus.shard_count TO 4;
SELECT master_remove_node('localhost', :worker_2_port);
master_remove_node
--------------------

View File

@ -45,15 +45,11 @@ CREATE TABLE insert_target (
);
-- squelch WARNINGs that contain worker_port
SET client_min_messages TO ERROR;
SELECT master_create_distributed_table('insert_target', 'id', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('insert_target', 2, 1);
master_create_worker_shards
-----------------------------
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('insert_target', 'id', 'hash');
create_distributed_table
--------------------------
(1 row)

View File

@ -41,28 +41,28 @@ CREATE TABLE table_to_distribute (
);
-- use the table WITH (OIDS) set
ALTER TABLE table_to_distribute SET WITH OIDS;
SELECT master_create_distributed_table('table_to_distribute', 'id', 'hash');
SELECT create_distributed_table('table_to_distribute', 'id', 'hash');
ERROR: cannot distribute relation: table_to_distribute
DETAIL: Distributed relations must not specify the WITH (OIDS) option in their definitions.
-- revert WITH (OIDS) from above
ALTER TABLE table_to_distribute SET WITHOUT OIDS;
-- use an index instead of table name
SELECT master_create_distributed_table('table_to_distribute_pkey', 'id', 'hash');
SELECT create_distributed_table('table_to_distribute_pkey', 'id', 'hash');
ERROR: table_to_distribute_pkey is not a regular, foreign or partitioned table
-- use a bad column name
SELECT master_create_distributed_table('table_to_distribute', 'bad_column', 'hash');
SELECT create_distributed_table('table_to_distribute', 'bad_column', 'hash');
ERROR: column "bad_column" of relation "table_to_distribute" does not exist
-- use unrecognized partition type
SELECT master_create_distributed_table('table_to_distribute', 'name', 'unrecognized');
SELECT create_distributed_table('table_to_distribute', 'name', 'unrecognized');
ERROR: invalid input value for enum citus.distribution_type: "unrecognized"
LINE 1: ..._distributed_table('table_to_distribute', 'name', 'unrecogni...
^
-- use a partition column of a type lacking any default operator class
SELECT master_create_distributed_table('table_to_distribute', 'json_data', 'hash');
SELECT create_distributed_table('table_to_distribute', 'json_data', 'hash');
ERROR: data type json has no default operator class for specified partition method
DETAIL: Partition column types must have a default operator class defined.
-- use a partition column of type lacking the required support function (hash)
SELECT master_create_distributed_table('table_to_distribute', 'test_type_data', 'hash');
SELECT create_distributed_table('table_to_distribute', 'test_type_data', 'hash');
ERROR: could not identify a hash function for type dummy_type
DETAIL: Partition column types must have a hash function defined to use hash partitioning.
-- distribute table and inspect side effects
@ -162,16 +162,12 @@ CREATE FOREIGN TABLE foreign_table_to_distribute
id bigint
)
SERVER fake_fdw_server;
SELECT master_create_distributed_table('foreign_table_to_distribute', 'id', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('foreign_table_to_distribute', 16, 1);
SET citus.shard_count TO 16;
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('foreign_table_to_distribute', 'id', 'hash');
NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined
master_create_worker_shards
-----------------------------
create_distributed_table
--------------------------
(1 row)
@ -204,15 +200,10 @@ CREATE TABLE weird_shard_count
name text,
id bigint
);
SELECT master_create_distributed_table('weird_shard_count', 'id', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('weird_shard_count', 7, 1);
master_create_worker_shards
-----------------------------
SET citus.shard_count TO 7;
SELECT create_distributed_table('weird_shard_count', 'id', 'hash');
create_distributed_table
--------------------------
(1 row)

View File

@ -25,12 +25,12 @@ CREATE TABLE lineitem (
l_shipmode char(10) not null,
l_comment varchar(44) not null,
PRIMARY KEY(l_orderkey, l_linenumber) );
SELECT master_create_distributed_table('lineitem', 'l_orderkey', 'append');
SELECT create_distributed_table('lineitem', 'l_orderkey', 'append');
WARNING: table "lineitem" has a UNIQUE or EXCLUDE constraint
DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced.
HINT: Consider using hash partitioning.
master_create_distributed_table
---------------------------------
create_distributed_table
--------------------------
(1 row)
@ -46,12 +46,12 @@ CREATE TABLE orders (
o_shippriority integer not null,
o_comment varchar(79) not null,
PRIMARY KEY(o_orderkey) );
SELECT master_create_distributed_table('orders', 'o_orderkey', 'append');
SELECT create_distributed_table('orders', 'o_orderkey', 'append');
WARNING: table "orders" has a UNIQUE or EXCLUDE constraint
DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced.
HINT: Consider using hash partitioning.
master_create_distributed_table
---------------------------------
create_distributed_table
--------------------------
(1 row)
@ -96,9 +96,9 @@ CREATE TABLE customer_append (
c_acctbal decimal(15,2) not null,
c_mktsegment char(10) not null,
c_comment varchar(117) not null);
SELECT master_create_distributed_table('customer_append', 'c_custkey', 'append');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('customer_append', 'c_custkey', 'append');
create_distributed_table
--------------------------
(1 row)
@ -139,9 +139,9 @@ CREATE TABLE part_append (
p_container char(10) not null,
p_retailprice decimal(15,2) not null,
p_comment varchar(23) not null);
SELECT master_create_distributed_table('part_append', 'p_partkey', 'append');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('part_append', 'p_partkey', 'append');
create_distributed_table
--------------------------
(1 row)
@ -173,9 +173,9 @@ CREATE TABLE supplier_single_shard
s_acctbal decimal(15,2) not null,
s_comment varchar(101) not null
);
SELECT master_create_distributed_table('supplier_single_shard', 's_suppkey', 'append');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('supplier_single_shard', 's_suppkey', 'append');
create_distributed_table
--------------------------
(1 row)

View File

@ -8,12 +8,12 @@ CREATE TABLE uniq_cns_append_tables
partition_col integer UNIQUE,
other_col integer
);
SELECT master_create_distributed_table('uniq_cns_append_tables', 'partition_col', 'append');
SELECT create_distributed_table('uniq_cns_append_tables', 'partition_col', 'append');
WARNING: table "uniq_cns_append_tables" has a UNIQUE or EXCLUDE constraint
DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced.
HINT: Consider using hash partitioning.
master_create_distributed_table
---------------------------------
create_distributed_table
--------------------------
(1 row)
@ -23,12 +23,12 @@ CREATE TABLE excl_cns_append_tables
other_col integer,
EXCLUDE (partition_col WITH =)
);
SELECT master_create_distributed_table('excl_cns_append_tables', 'partition_col', 'append');
SELECT create_distributed_table('excl_cns_append_tables', 'partition_col', 'append');
WARNING: table "excl_cns_append_tables" has a UNIQUE or EXCLUDE constraint
DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced.
HINT: Consider using hash partitioning.
master_create_distributed_table
---------------------------------
create_distributed_table
--------------------------
(1 row)
@ -39,7 +39,7 @@ CREATE TABLE pk_on_non_part_col
partition_col integer,
other_col integer PRIMARY KEY
);
SELECT master_create_distributed_table('pk_on_non_part_col', 'partition_col', 'hash');
SELECT create_distributed_table('pk_on_non_part_col', 'partition_col', 'hash');
ERROR: cannot create constraint on "pk_on_non_part_col"
DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE).
CREATE TABLE uq_on_non_part_col
@ -47,7 +47,7 @@ CREATE TABLE uq_on_non_part_col
partition_col integer,
other_col integer UNIQUE
);
SELECT master_create_distributed_table('uq_on_non_part_col', 'partition_col', 'hash');
SELECT create_distributed_table('uq_on_non_part_col', 'partition_col', 'hash');
ERROR: cannot create constraint on "uq_on_non_part_col"
DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE).
CREATE TABLE ex_on_non_part_col
@ -56,7 +56,7 @@ CREATE TABLE ex_on_non_part_col
other_col integer,
EXCLUDE (other_col WITH =)
);
SELECT master_create_distributed_table('ex_on_non_part_col', 'partition_col', 'hash');
SELECT create_distributed_table('ex_on_non_part_col', 'partition_col', 'hash');
ERROR: cannot create constraint on "ex_on_non_part_col"
DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE).
-- now show that Citus can distribute unique and EXCLUDE constraints that
@ -69,9 +69,9 @@ CREATE TABLE pk_on_part_col
partition_col integer PRIMARY KEY,
other_col integer
);
SELECT master_create_distributed_table('pk_on_part_col', 'partition_col', 'hash');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('pk_on_part_col', 'partition_col', 'hash');
create_distributed_table
--------------------------
(1 row)
@ -80,9 +80,9 @@ CREATE TABLE uq_part_col
partition_col integer UNIQUE,
other_col integer
);
SELECT master_create_distributed_table('uq_part_col', 'partition_col', 'hash');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('uq_part_col', 'partition_col', 'hash');
create_distributed_table
--------------------------
(1 row)
@ -92,21 +92,15 @@ CREATE TABLE uq_two_columns
other_col integer,
UNIQUE (partition_col, other_col)
);
SELECT master_create_distributed_table('uq_two_columns', 'partition_col', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('uq_two_columns', '4', '2');
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('uq_two_columns', 'partition_col', 'hash');
create_distributed_table
--------------------------
(1 row)
INSERT INTO uq_two_columns (partition_col, other_col) VALUES (1,1);
INSERT INTO uq_two_columns (partition_col, other_col) VALUES (1,1);
ERROR: duplicate key value violates unique constraint "uq_two_columns_partition_col_other_col_key_365000"
ERROR: duplicate key value violates unique constraint "uq_two_columns_partition_col_other_col_key_365008"
DETAIL: Key (partition_col, other_col)=(1, 1) already exists.
CONTEXT: while executing command on localhost:57637
CREATE TABLE ex_on_part_col
@ -115,44 +109,32 @@ CREATE TABLE ex_on_part_col
other_col integer,
EXCLUDE (partition_col WITH =)
);
SELECT master_create_distributed_table('ex_on_part_col', 'partition_col', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('ex_on_part_col', '4', '2');
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('ex_on_part_col', 'partition_col', 'hash');
create_distributed_table
--------------------------
(1 row)
INSERT INTO ex_on_part_col (partition_col, other_col) VALUES (1,1);
INSERT INTO ex_on_part_col (partition_col, other_col) VALUES (1,2);
ERROR: conflicting key value violates exclusion constraint "ex_on_part_col_partition_col_excl_365004"
ERROR: conflicting key value violates exclusion constraint "ex_on_part_col_partition_col_excl_365012"
DETAIL: Key (partition_col)=(1) conflicts with existing key (partition_col)=(1).
CONTEXT: while executing command on localhost:57637
CONTEXT: while executing command on localhost:57638
CREATE TABLE ex_on_two_columns
(
partition_col integer,
other_col integer,
EXCLUDE (partition_col WITH =, other_col WITH =)
);
SELECT master_create_distributed_table('ex_on_two_columns', 'partition_col', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('ex_on_two_columns', '4', '2');
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('ex_on_two_columns', 'partition_col', 'hash');
create_distributed_table
--------------------------
(1 row)
INSERT INTO ex_on_two_columns (partition_col, other_col) VALUES (1,1);
INSERT INTO ex_on_two_columns (partition_col, other_col) VALUES (1,1);
ERROR: conflicting key value violates exclusion constraint "ex_on_two_columns_partition_col_other_col_excl_365008"
ERROR: conflicting key value violates exclusion constraint "ex_on_two_columns_partition_col_other_col_excl_365016"
DETAIL: Key (partition_col, other_col)=(1, 1) conflicts with existing key (partition_col, other_col)=(1, 1).
CONTEXT: while executing command on localhost:57637
CREATE TABLE ex_on_two_columns_prt
@ -161,15 +143,9 @@ CREATE TABLE ex_on_two_columns_prt
other_col integer,
EXCLUDE (partition_col WITH =, other_col WITH =) WHERE (other_col > 100)
);
SELECT master_create_distributed_table('ex_on_two_columns_prt', 'partition_col', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('ex_on_two_columns_prt', '4', '2');
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('ex_on_two_columns_prt', 'partition_col', 'hash');
create_distributed_table
--------------------------
(1 row)
@ -177,16 +153,16 @@ INSERT INTO ex_on_two_columns_prt (partition_col, other_col) VALUES (1,1);
INSERT INTO ex_on_two_columns_prt (partition_col, other_col) VALUES (1,1);
INSERT INTO ex_on_two_columns_prt (partition_col, other_col) VALUES (1,101);
INSERT INTO ex_on_two_columns_prt (partition_col, other_col) VALUES (1,101);
ERROR: conflicting key value violates exclusion constraint "ex_on_two_columns_prt_partition_col_other_col_excl_365012"
ERROR: conflicting key value violates exclusion constraint "ex_on_two_columns_prt_partition_col_other_col_excl_365020"
DETAIL: Key (partition_col, other_col)=(1, 101) conflicts with existing key (partition_col, other_col)=(1, 101).
CONTEXT: while executing command on localhost:57637
CONTEXT: while executing command on localhost:57638
CREATE TABLE ex_wrong_operator
(
partition_col tsrange,
other_col tsrange,
EXCLUDE USING gist (other_col WITH =, partition_col WITH &&)
);
SELECT master_create_distributed_table('ex_wrong_operator', 'partition_col', 'hash');
SELECT create_distributed_table('ex_wrong_operator', 'partition_col', 'hash');
ERROR: cannot create constraint on "ex_wrong_operator"
DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE).
CREATE TABLE ex_overlaps
@ -195,21 +171,15 @@ CREATE TABLE ex_overlaps
other_col tsrange,
EXCLUDE USING gist (other_col WITH &&, partition_col WITH =)
);
SELECT master_create_distributed_table('ex_overlaps', 'partition_col', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('ex_overlaps', '4', '2');
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('ex_overlaps', 'partition_col', 'hash');
create_distributed_table
--------------------------
(1 row)
INSERT INTO ex_overlaps (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-01 00:00:00, 2016-02-01 00:00:00]');
INSERT INTO ex_overlaps (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-15 00:00:00, 2016-02-01 00:00:00]');
ERROR: conflicting key value violates exclusion constraint "ex_overlaps_other_col_partition_col_excl_365019"
ERROR: conflicting key value violates exclusion constraint "ex_overlaps_other_col_partition_col_excl_365027"
DETAIL: Key (other_col, partition_col)=(["2016-01-15 00:00:00","2016-02-01 00:00:00"], ["2016-01-01 00:00:00","2016-02-01 00:00:00"]) conflicts with existing key (other_col, partition_col)=(["2016-01-01 00:00:00","2016-02-01 00:00:00"], ["2016-01-01 00:00:00","2016-02-01 00:00:00"]).
CONTEXT: while executing command on localhost:57638
-- now show that Citus can distribute unique and EXCLUDE constraints that
@ -222,9 +192,9 @@ CREATE TABLE pk_on_part_col_named
partition_col integer CONSTRAINT pk_on_part_col_named_pk PRIMARY KEY,
other_col integer
);
SELECT master_create_distributed_table('pk_on_part_col_named', 'partition_col', 'hash');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('pk_on_part_col_named', 'partition_col', 'hash');
create_distributed_table
--------------------------
(1 row)
@ -233,9 +203,9 @@ CREATE TABLE uq_part_col_named
partition_col integer CONSTRAINT uq_part_col_named_uniq UNIQUE,
other_col integer
);
SELECT master_create_distributed_table('uq_part_col_named', 'partition_col', 'hash');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('uq_part_col_named', 'partition_col', 'hash');
create_distributed_table
--------------------------
(1 row)
@ -245,21 +215,15 @@ CREATE TABLE uq_two_columns_named
other_col integer,
CONSTRAINT uq_two_columns_named_uniq UNIQUE (partition_col, other_col)
);
SELECT master_create_distributed_table('uq_two_columns_named', 'partition_col', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('uq_two_columns_named', '4', '2');
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('uq_two_columns_named', 'partition_col', 'hash');
create_distributed_table
--------------------------
(1 row)
INSERT INTO uq_two_columns_named (partition_col, other_col) VALUES (1,1);
INSERT INTO uq_two_columns_named (partition_col, other_col) VALUES (1,1);
ERROR: duplicate key value violates unique constraint "uq_two_columns_named_uniq_365020"
ERROR: duplicate key value violates unique constraint "uq_two_columns_named_uniq_365036"
DETAIL: Key (partition_col, other_col)=(1, 1) already exists.
CONTEXT: while executing command on localhost:57637
CREATE TABLE ex_on_part_col_named
@ -268,44 +232,32 @@ CREATE TABLE ex_on_part_col_named
other_col integer,
CONSTRAINT ex_on_part_col_named_exclude EXCLUDE (partition_col WITH =)
);
SELECT master_create_distributed_table('ex_on_part_col_named', 'partition_col', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('ex_on_part_col_named', '4', '2');
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('ex_on_part_col_named', 'partition_col', 'hash');
create_distributed_table
--------------------------
(1 row)
INSERT INTO ex_on_part_col_named (partition_col, other_col) VALUES (1,1);
INSERT INTO ex_on_part_col_named (partition_col, other_col) VALUES (1,2);
ERROR: conflicting key value violates exclusion constraint "ex_on_part_col_named_exclude_365024"
ERROR: conflicting key value violates exclusion constraint "ex_on_part_col_named_exclude_365040"
DETAIL: Key (partition_col)=(1) conflicts with existing key (partition_col)=(1).
CONTEXT: while executing command on localhost:57637
CONTEXT: while executing command on localhost:57638
CREATE TABLE ex_on_two_columns_named
(
partition_col integer,
other_col integer,
CONSTRAINT ex_on_two_columns_named_exclude EXCLUDE (partition_col WITH =, other_col WITH =)
);
SELECT master_create_distributed_table('ex_on_two_columns_named', 'partition_col', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('ex_on_two_columns_named', '4', '2');
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('ex_on_two_columns_named', 'partition_col', 'hash');
create_distributed_table
--------------------------
(1 row)
INSERT INTO ex_on_two_columns_named (partition_col, other_col) VALUES (1,1);
INSERT INTO ex_on_two_columns_named (partition_col, other_col) VALUES (1,1);
ERROR: conflicting key value violates exclusion constraint "ex_on_two_columns_named_exclude_365028"
ERROR: conflicting key value violates exclusion constraint "ex_on_two_columns_named_exclude_365044"
DETAIL: Key (partition_col, other_col)=(1, 1) conflicts with existing key (partition_col, other_col)=(1, 1).
CONTEXT: while executing command on localhost:57637
CREATE TABLE ex_multiple_excludes
@ -316,34 +268,28 @@ CREATE TABLE ex_multiple_excludes
CONSTRAINT ex_multiple_excludes_excl1 EXCLUDE (partition_col WITH =, other_col WITH =),
CONSTRAINT ex_multiple_excludes_excl2 EXCLUDE (partition_col WITH =, other_other_col WITH =)
);
SELECT master_create_distributed_table('ex_multiple_excludes', 'partition_col', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('ex_multiple_excludes', '4', '2');
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('ex_multiple_excludes', 'partition_col', 'hash');
create_distributed_table
--------------------------
(1 row)
INSERT INTO ex_multiple_excludes (partition_col, other_col, other_other_col) VALUES (1,1,1);
INSERT INTO ex_multiple_excludes (partition_col, other_col, other_other_col) VALUES (1,1,2);
ERROR: conflicting key value violates exclusion constraint "ex_multiple_excludes_excl1_365032"
ERROR: conflicting key value violates exclusion constraint "ex_multiple_excludes_excl1_365048"
DETAIL: Key (partition_col, other_col)=(1, 1) conflicts with existing key (partition_col, other_col)=(1, 1).
CONTEXT: while executing command on localhost:57637
CONTEXT: while executing command on localhost:57638
INSERT INTO ex_multiple_excludes (partition_col, other_col, other_other_col) VALUES (1,2,1);
ERROR: conflicting key value violates exclusion constraint "ex_multiple_excludes_excl2_365032"
ERROR: conflicting key value violates exclusion constraint "ex_multiple_excludes_excl2_365048"
DETAIL: Key (partition_col, other_other_col)=(1, 1) conflicts with existing key (partition_col, other_other_col)=(1, 1).
CONTEXT: while executing command on localhost:57637
CONTEXT: while executing command on localhost:57638
CREATE TABLE ex_wrong_operator_named
(
partition_col tsrange,
other_col tsrange,
CONSTRAINT ex_wrong_operator_named_exclude EXCLUDE USING gist (other_col WITH =, partition_col WITH &&)
);
SELECT master_create_distributed_table('ex_wrong_operator_named', 'partition_col', 'hash');
SELECT create_distributed_table('ex_wrong_operator_named', 'partition_col', 'hash');
ERROR: cannot create constraint on "ex_wrong_operator_named"
DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE).
CREATE TABLE ex_overlaps_named
@ -352,32 +298,26 @@ CREATE TABLE ex_overlaps_named
other_col tsrange,
CONSTRAINT ex_overlaps_operator_named_exclude EXCLUDE USING gist (other_col WITH &&, partition_col WITH =)
);
SELECT master_create_distributed_table('ex_overlaps_named', 'partition_col', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('ex_overlaps_named', '4', '2');
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('ex_overlaps_named', 'partition_col', 'hash');
create_distributed_table
--------------------------
(1 row)
INSERT INTO ex_overlaps_named (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-01 00:00:00, 2016-02-01 00:00:00]');
INSERT INTO ex_overlaps_named (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-15 00:00:00, 2016-02-01 00:00:00]');
ERROR: conflicting key value violates exclusion constraint "ex_overlaps_operator_named_exclude_365039"
ERROR: conflicting key value violates exclusion constraint "ex_overlaps_operator_named_exclude_365055"
DETAIL: Key (other_col, partition_col)=(["2016-01-15 00:00:00","2016-02-01 00:00:00"], ["2016-01-01 00:00:00","2016-02-01 00:00:00"]) conflicts with existing key (other_col, partition_col)=(["2016-01-01 00:00:00","2016-02-01 00:00:00"], ["2016-01-01 00:00:00","2016-02-01 00:00:00"]).
CONTEXT: while executing command on localhost:57638
CONTEXT: while executing command on localhost:57637
-- now show that Citus allows unique constraints on range-partitioned tables.
CREATE TABLE uq_range_tables
(
partition_col integer UNIQUE,
other_col integer
);
SELECT master_create_distributed_table('uq_range_tables', 'partition_col', 'range');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('uq_range_tables', 'partition_col', 'range');
create_distributed_table
--------------------------
(1 row)
@ -388,27 +328,21 @@ CREATE TABLE check_example
other_col integer CHECK (other_col >= 100),
other_other_col integer CHECK (abs(other_other_col) >= 100)
);
SELECT master_create_distributed_table('check_example', 'partition_col', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('check_example', '2', '2');
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('check_example', 'partition_col', 'hash');
create_distributed_table
--------------------------
(1 row)
\c - - - :worker_1_port
\d check_example_partition_col_key_365040
Index "public.check_example_partition_col_key_365040"
\d check_example_partition_col_key_365056
Index "public.check_example_partition_col_key_365056"
Column | Type | Definition
---------------+---------+---------------
partition_col | integer | partition_col
unique, btree, for table "public.check_example_365040"
unique, btree, for table "public.check_example_365056"
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.check_example_365040'::regclass;
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.check_example_365056'::regclass;
Constraint | Definition
-------------------------------------+-------------------------------------
check_example_other_col_check | CHECK (other_col >= 100)

View File

@ -25,24 +25,24 @@ SHOW citus.multi_task_query_log_level;
off
(1 row)
SELECT * FROM multi_task_table;
SELECT * FROM multi_task_table ORDER BY 1;
id | name
----+--------
1 | elem_1
3 | elem_3
2 | elem_2
3 | elem_3
(3 rows)
-- Get messages with the log level 'notice'
SET citus.multi_task_query_log_level TO notice;
SELECT * FROM multi_task_table;
SELECT * FROM multi_task_table ORDER BY 1;
NOTICE: multi-task query about to be executed
HINT: Queries are split to multiple tasks if they have to be split into several queries on the workers.
id | name
----+--------
1 | elem_1
3 | elem_3
2 | elem_2
3 | elem_3
(3 rows)
SELECT AVG(id) AS avg_id FROM multi_task_table;
@ -101,13 +101,13 @@ HINT: Queries are split to multiple tasks if they have to be split into several
INSERT INTO summary_table SELECT id, SUM(order_count) FROM raw_table WHERE id = 1 GROUP BY id;
-- Should have four rows (three rows from the query without where and the one from with where)
SET citus.multi_task_query_log_level to DEFAULT;
SELECT * FROM summary_table;
SELECT * FROM summary_table ORDER BY 1,2;
id | order_sum
----+-----------
1 | 35
1 | 35
3 | 50
2 | 40
3 | 50
(4 rows)
-- Set log-level to different levels inside the transaction
@ -125,13 +125,13 @@ HINT: Queries are split to multiple tasks if they have to be split into several
ROLLBACK;
-- Should have only four rows since the transaction is rollbacked.
SET citus.multi_task_query_log_level to DEFAULT;
SELECT * FROM summary_table;
SELECT * FROM summary_table ORDER BY 1,2;
id | order_sum
----+-----------
1 | 35
1 | 35
3 | 50
2 | 40
3 | 50
(4 rows)
-- Test router-select query

View File

@ -51,15 +51,10 @@ CREATE TABLE composite_type_partitioned_table
id integer,
col test_composite_type
);
SELECT master_create_distributed_table('composite_type_partitioned_table', 'col', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('composite_type_partitioned_table', 4, 1);
master_create_worker_shards
-----------------------------
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('composite_type_partitioned_table', 'col', 'hash');
create_distributed_table
--------------------------
(1 row)
@ -88,15 +83,9 @@ CREATE TABLE bugs (
id integer,
status bug_status
);
SELECT master_create_distributed_table('bugs', 'status', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('bugs', 4, 1);
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('bugs', 'status', 'hash');
create_distributed_table
--------------------------
(1 row)
@ -128,15 +117,9 @@ CREATE TABLE varchar_hash_partitioned_table
id int,
name varchar
);
SELECT master_create_distributed_table('varchar_hash_partitioned_table', 'name', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('varchar_hash_partitioned_table', 4, 1);
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('varchar_hash_partitioned_table', 'name', 'hash');
create_distributed_table
--------------------------
(1 row)

View File

@ -2,6 +2,7 @@
-- MULTI_DEPARSE_SHARD_QUERY
--
SET citus.next_shard_id TO 13100000;
SET citus.shard_replication_factor TO 1;
CREATE FUNCTION deparse_shard_query_test(text)
RETURNS VOID
AS 'citus'
@ -18,15 +19,9 @@ CREATE TABLE raw_events_1
value_7 int,
event_at date DEfAULT now()
);
SELECT master_create_distributed_table('raw_events_1', 'tenant_id', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('raw_events_1', 4, 1);
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('raw_events_1', 'tenant_id', 'hash');
create_distributed_table
--------------------------
(1 row)
@ -42,15 +37,9 @@ CREATE TABLE raw_events_2
value_7 int,
event_at date DEfAULT now()
);
SELECT master_create_distributed_table('raw_events_2', 'tenant_id', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('raw_events_2', 4, 1);
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('raw_events_2', 'tenant_id', 'hash');
create_distributed_table
--------------------------
(1 row)
@ -63,15 +52,9 @@ CREATE TABLE aggregated_events
sum_value_5 float,
average_value_6 int,
rollup_hour date);
SELECT master_create_distributed_table('aggregated_events', 'tenant_id', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('aggregated_events', 4, 1);
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('aggregated_events', 'tenant_id', 'hash');
create_distributed_table
--------------------------
(1 row)

View File

@ -46,16 +46,9 @@ CREATE TABLE events_hash (
id bigint,
name text
);
SELECT master_create_distributed_table('events_hash', 'name', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
-- create worker shards
SELECT master_create_worker_shards('events_hash', 4, 2);
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('events_hash', 'name', 'hash');
create_distributed_table
--------------------------
(1 row)

View File

@ -4,9 +4,9 @@
-- Tests around dropping and recreating the extension
SET citus.next_shard_id TO 550000;
CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL);
SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('testtableddl', 'distributecol', 'append');
create_distributed_table
--------------------------
(1 row)
@ -33,9 +33,9 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port);
-- verify that a table can be created after the extension has been dropped and recreated
CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL);
SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('testtableddl', 'distributecol', 'append');
create_distributed_table
--------------------------
(1 row)

View File

@ -3,15 +3,14 @@
--
-- Check that we can run CREATE INDEX and DROP INDEX statements on distributed
-- tables.
SET citus.next_shard_id TO 640000;
--
-- CREATE TEST TABLES
--
SET citus.next_shard_id TO 102080;
CREATE TABLE index_test_range(a int, b int, c int);
SELECT master_create_distributed_table('index_test_range', 'a', 'range');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('index_test_range', 'a', 'range');
create_distributed_table
--------------------------
(1 row)
@ -27,23 +26,19 @@ SELECT master_create_empty_shard('index_test_range');
102081
(1 row)
SET citus.shard_count TO 8;
SET citus.shard_replication_factor TO 2;
CREATE TABLE index_test_hash(a int, b int, c int);
SELECT master_create_distributed_table('index_test_hash', 'a', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('index_test_hash', 8, 2);
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('index_test_hash', 'a', 'hash');
create_distributed_table
--------------------------
(1 row)
CREATE TABLE index_test_append(a int, b int, c int);
SELECT master_create_distributed_table('index_test_append', 'a', 'append');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('index_test_append', 'a', 'append');
create_distributed_table
--------------------------
(1 row)

View File

@ -3,15 +3,14 @@
--
-- Check that we can run CREATE INDEX and DROP INDEX statements on distributed
-- tables.
SET citus.next_shard_id TO 640000;
--
-- CREATE TEST TABLES
--
SET citus.next_shard_id TO 102080;
CREATE TABLE index_test_range(a int, b int, c int);
SELECT master_create_distributed_table('index_test_range', 'a', 'range');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('index_test_range', 'a', 'range');
create_distributed_table
--------------------------
(1 row)
@ -27,23 +26,19 @@ SELECT master_create_empty_shard('index_test_range');
102081
(1 row)
SET citus.shard_count TO 8;
SET citus.shard_replication_factor TO 2;
CREATE TABLE index_test_hash(a int, b int, c int);
SELECT master_create_distributed_table('index_test_hash', 'a', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('index_test_hash', 8, 2);
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('index_test_hash', 'a', 'hash');
create_distributed_table
--------------------------
(1 row)
CREATE TABLE index_test_append(a int, b int, c int);
SELECT master_create_distributed_table('index_test_append', 'a', 'append');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('index_test_append', 'a', 'append');
create_distributed_table
--------------------------
(1 row)

View File

@ -75,19 +75,19 @@ WHERE
raw_events_first.user_id = raw_events_second.user_id;
user_id
---------
1
5
3
4
6
1
5
2
6
(6 rows)
-- see that we get unique vialitons
INSERT INTO raw_events_second SELECT * FROM raw_events_first;
ERROR: duplicate key value violates unique constraint "raw_events_second_user_id_value_1_key_13300004"
DETAIL: Key (user_id, value_1)=(1, 10) already exists.
CONTEXT: while executing command on localhost:57637
CONTEXT: while executing command on localhost:57638
-- stable functions should be allowed
INSERT INTO raw_events_second (user_id, time)
SELECT
@ -236,7 +236,7 @@ DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS
DEBUG: Plan is router executable
ERROR: duplicate key value violates unique constraint "raw_events_second_user_id_value_1_key_13300007"
DETAIL: Key (user_id, value_1)=(9, 90) already exists.
CONTEXT: while executing command on localhost:57638
CONTEXT: while executing command on localhost:57637
-- now do some aggregations
INSERT INTO agg_events
SELECT
@ -266,7 +266,7 @@ DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_t
DEBUG: Plan is router executable
ERROR: duplicate key value violates unique constraint "agg_events_user_id_value_1_agg_key_13300008"
DETAIL: Key (user_id, value_1_agg)=(1, 10) already exists.
CONTEXT: while executing command on localhost:57638
CONTEXT: while executing command on localhost:57637
-- some subquery tests
INSERT INTO agg_events
(value_1_agg,
@ -287,7 +287,7 @@ DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_t
DEBUG: Plan is router executable
ERROR: duplicate key value violates unique constraint "agg_events_user_id_value_1_agg_key_13300008"
DETAIL: Key (user_id, value_1_agg)=(1, 10) already exists.
CONTEXT: while executing command on localhost:57638
CONTEXT: while executing command on localhost:57637
-- subquery one more level depth
INSERT INTO agg_events
(value_4_agg,
@ -311,7 +311,7 @@ DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_t
DEBUG: Plan is router executable
ERROR: duplicate key value violates unique constraint "agg_events_user_id_value_1_agg_key_13300008"
DETAIL: Key (user_id, value_1_agg)=(1, 10) already exists.
CONTEXT: while executing command on localhost:57638
CONTEXT: while executing command on localhost:57637
-- join between subqueries
INSERT INTO agg_events
(user_id)
@ -1722,7 +1722,7 @@ BEGIN;
ALTER TABLE reference_table ADD COLUMN z int;
INSERT INTO raw_events_first (user_id)
SELECT user_id FROM raw_events_second JOIN reference_table USING (user_id);
ERROR: cannot establish a new connection for placement 13300024, since DDL has been executed on a connection that is in use
ERROR: cannot establish a new connection for placement 13300025, since DDL has been executed on a connection that is in use
ROLLBACK;
-- Insert after copy is allowed
BEGIN;
@ -2210,14 +2210,14 @@ TRUNCATE raw_events_first;
BEGIN;
INSERT INTO raw_events_first (user_id, value_1)
SELECT s, s FROM generate_series(1, 5) s;
SELECT user_id, value_1 FROM raw_events_first;
SELECT user_id, value_1 FROM raw_events_first ORDER BY 1;
user_id | value_1
---------+---------
1 | 1
5 | 5
2 | 2
3 | 3
4 | 4
2 | 2
5 | 5
(5 rows)
ROLLBACK;

View File

@ -20,52 +20,33 @@ CREATE TABLE multiple_hash (
CREATE TABLE insufficient_shards ( LIKE limit_orders );
CREATE TABLE range_partitioned ( LIKE limit_orders );
CREATE TABLE append_partitioned ( LIKE limit_orders );
SELECT master_create_distributed_table('limit_orders', 'id', 'hash');
master_create_distributed_table
---------------------------------
SET citus.shard_count TO 2;
SELECT create_distributed_table('limit_orders', 'id', 'hash');
create_distributed_table
--------------------------
(1 row)
SELECT master_create_distributed_table('multiple_hash', 'category', 'hash');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('multiple_hash', 'id', 'hash');
ERROR: column "id" of relation "multiple_hash" does not exist
SELECT create_distributed_table('range_partitioned', 'id', 'range');
create_distributed_table
--------------------------
(1 row)
SELECT master_create_distributed_table('insufficient_shards', 'id', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_distributed_table('range_partitioned', 'id', 'range');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_distributed_table('append_partitioned', 'id', 'append');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('limit_orders', 2, 2);
master_create_worker_shards
-----------------------------
(1 row)
SELECT master_create_worker_shards('multiple_hash', 2, 2);
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('append_partitioned', 'id', 'append');
create_distributed_table
--------------------------
(1 row)
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 1;
-- make a single shard that covers no partition values
SELECT master_create_worker_shards('insufficient_shards', 1, 1);
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('insufficient_shards', 'id', 'hash');
create_distributed_table
--------------------------
(1 row)
@ -177,21 +158,21 @@ INSERT INTO limit_orders VALUES (18811, 'BUD', 14962, '2014-04-05 08:32:16', 'se
-5.00);
ERROR: new row for relation "limit_orders_750000" violates check constraint "limit_orders_limit_price_check"
DETAIL: Failing row contains (18811, BUD, 14962, 2014-04-05 08:32:16, sell, -5.00).
CONTEXT: while executing command on localhost:57637
CONTEXT: while executing command on localhost:57638
-- INSERT violating primary key constraint
INSERT INTO limit_orders VALUES (32743, 'LUV', 5994, '2001-04-16 03:37:28', 'buy', 0.58);
ERROR: duplicate key value violates unique constraint "limit_orders_pkey_750001"
DETAIL: Key (id)=(32743) already exists.
CONTEXT: while executing command on localhost:57638
CONTEXT: while executing command on localhost:57637
-- INSERT violating primary key constraint, with RETURNING specified.
INSERT INTO limit_orders VALUES (32743, 'LUV', 5994, '2001-04-16 03:37:28', 'buy', 0.58) RETURNING *;
ERROR: duplicate key value violates unique constraint "limit_orders_pkey_750001"
DETAIL: Key (id)=(32743) already exists.
CONTEXT: while executing command on localhost:57638
CONTEXT: while executing command on localhost:57637
-- INSERT, with RETURNING specified, failing with a non-constraint error
INSERT INTO limit_orders VALUES (34153, 'LEE', 5994, '2001-04-16 03:37:28', 'buy', 0.58) RETURNING id / 0;
ERROR: division by zero
CONTEXT: while executing command on localhost:57637
CONTEXT: while executing command on localhost:57638
SET client_min_messages TO DEFAULT;
-- commands with non-constant partition values are supported
INSERT INTO limit_orders VALUES (random() * 100, 'ORCL', 152, '2011-08-25 11:50:45',
@ -301,7 +282,7 @@ DELETE FROM limit_orders WHERE id < 0;
WITH new_orders AS (INSERT INTO limit_orders VALUES (412, 'FLO', 12, '2017-07-02 16:32:15', 'buy', 66))
DELETE FROM limit_orders RETURNING id / 0;
ERROR: division by zero
CONTEXT: while executing command on localhost:57638
CONTEXT: while executing command on localhost:57637
SELECT * FROM limit_orders WHERE id = 412;
id | symbol | bidder_id | placed_at | kind | limit_price
----+--------+-----------+-----------+------+-------------
@ -358,7 +339,7 @@ INSERT INTO limit_orders VALUES (275, 'ADR', 140, '2007-07-02 16:32:15', 'sell',
INSERT INTO limit_orders VALUES (275, 'ADR', 140, '2007-07-02 16:32:15', 'sell', 43.67);
ERROR: duplicate key value violates unique constraint "limit_orders_pkey_750001"
DETAIL: Key (id)=(275) already exists.
CONTEXT: while executing command on localhost:57638
CONTEXT: while executing command on localhost:57637
-- Test that shards which miss a modification are marked unhealthy
-- First: Connect to the second worker node
\c - - - :worker_2_port
@ -437,10 +418,9 @@ UPDATE limit_orders SET limit_price = 0.00 FROM bidders
limit_orders.bidder_id = bidders.id AND
bidders.name = 'Bernie Madoff';
ERROR: relation bidders is not distributed
-- the connection used for the INSERT is claimed by pull-push, causing the UPDATE to fail
-- should succeed with a CTE
WITH deleted_orders AS (INSERT INTO limit_orders VALUES (399, 'PDR', 14, '2017-07-02 16:32:15', 'sell', 43))
UPDATE limit_orders SET symbol = 'GM';
ERROR: cannot establish a new connection for placement 750003, since DML has been executed on a connection that is in use
SELECT symbol, bidder_id FROM limit_orders WHERE id = 246;
symbol | bidder_id
--------+-----------
@ -620,15 +600,10 @@ SELECT * FROM multiple_hash WHERE category = '2' ORDER BY category, data;
-- verify interaction of default values, SERIAL, and RETURNING
\set QUIET on
CREATE TABLE app_analytics_events (id serial, app_id integer, name text);
SELECT master_create_distributed_table('app_analytics_events', 'app_id', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('app_analytics_events', 4, 1);
master_create_worker_shards
-----------------------------
SET citus.shard_count TO 4;
SELECT create_distributed_table('app_analytics_events', 'app_id', 'hash');
create_distributed_table
--------------------------
(1 row)
@ -784,7 +759,7 @@ SELECT * FROM app_analytics_events ORDER BY id;
DROP TABLE app_analytics_events;
-- Test multi-row insert with a dropped column before the partition column
CREATE TABLE app_analytics_events (id int default 3, app_id integer, name text);
SELECT create_distributed_table('app_analytics_events', 'name');
SELECT create_distributed_table('app_analytics_events', 'name', colocate_with => 'none');
create_distributed_table
--------------------------

File diff suppressed because it is too large Load Diff

View File

@ -466,28 +466,28 @@ FROM pg_dist_partition NATURAL JOIN shard_counts
ORDER BY colocationid, logicalrelid;
logicalrelid | colocationid | shard_count | partmethod | repmodel
--------------------------------------------------------+--------------+-------------+------------+----------
citus_mx_test_schema_join_1.nation_hash | 2 | 4 | h | s
citus_mx_test_schema_join_1.nation_hash_2 | 2 | 4 | h | s
citus_mx_test_schema_join_2.nation_hash | 2 | 4 | h | s
citus_mx_test_schema.nation_hash_collation_search_path | 2 | 4 | h | s
citus_mx_test_schema.nation_hash_composite_types | 2 | 4 | h | s
mx_ddl_table | 2 | 4 | h | s
app_analytics_events_mx | 2 | 4 | h | s
company_employees_mx | 2 | 4 | h | s
nation_hash | 3 | 16 | h | s
citus_mx_test_schema.nation_hash | 3 | 16 | h | s
lineitem_mx | 4 | 16 | h | s
orders_mx | 4 | 16 | h | s
customer_mx | 5 | 1 | n | t
nation_mx | 5 | 1 | n | t
part_mx | 5 | 1 | n | t
supplier_mx | 5 | 1 | n | t
limit_orders_mx | 6 | 2 | h | s
articles_hash_mx | 6 | 2 | h | s
multiple_hash_mx | 7 | 2 | h | s
researchers_mx | 8 | 2 | h | s
labs_mx | 9 | 1 | h | s
objects_mx | 9 | 1 | h | s
articles_single_shard_hash_mx | 9 | 1 | h | s
citus_mx_test_schema_join_1.nation_hash | 3 | 4 | h | s
citus_mx_test_schema_join_1.nation_hash_2 | 3 | 4 | h | s
citus_mx_test_schema_join_2.nation_hash | 3 | 4 | h | s
citus_mx_test_schema.nation_hash_collation_search_path | 3 | 4 | h | s
citus_mx_test_schema.nation_hash_composite_types | 3 | 4 | h | s
mx_ddl_table | 3 | 4 | h | s
app_analytics_events_mx | 3 | 4 | h | s
company_employees_mx | 3 | 4 | h | s
nation_hash | 4 | 16 | h | s
citus_mx_test_schema.nation_hash | 4 | 16 | h | s
lineitem_mx | 5 | 16 | h | s
orders_mx | 5 | 16 | h | s
customer_mx | 6 | 1 | n | t
nation_mx | 6 | 1 | n | t
part_mx | 6 | 1 | n | t
supplier_mx | 6 | 1 | n | t
limit_orders_mx | 7 | 2 | h | s
articles_hash_mx | 7 | 2 | h | s
multiple_hash_mx | 8 | 2 | h | s
researchers_mx | 9 | 2 | h | s
labs_mx | 10 | 1 | h | s
objects_mx | 10 | 1 | h | s
articles_single_shard_hash_mx | 10 | 1 | h | s
(23 rows)

View File

@ -3,6 +3,7 @@
--
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 225000;
SET citus.multi_shard_commit_protocol = '2pc';
SET citus.shard_count TO 2;
-- Verify that a table name > 56 characters gets hashed properly.
CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 (
col1 integer not null,
@ -29,6 +30,8 @@ SELECT master_create_worker_shards('too_long_12345678901234567890123456789012345
(2 rows)
\c - - - :master_port
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 2;
-- Verify that the UDF works and rejects bad arguments.
SELECT shard_name(NULL, 666666);
shard_name
@ -59,15 +62,9 @@ CREATE TABLE name_lengths (
col2 integer not null,
constraint constraint_a UNIQUE (col1)
);
SELECT master_create_distributed_table('name_lengths', 'col1', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('name_lengths', '2', '2');
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('name_lengths', 'col1', 'hash');
create_distributed_table
--------------------------
(1 row)
@ -165,6 +162,8 @@ Index "public.tmp_idx_123456789012345678901234567890123456789_5e470afa_225003"
btree, for table "public.name_lengths_225003"
\c - - - :master_port
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 2;
-- Verify that distributed tables with too-long names
-- for CHECK constraints are no trouble.
CREATE TABLE sneaky_name_lengths (
@ -173,15 +172,9 @@ CREATE TABLE sneaky_name_lengths (
int_col_12345678901234567890123456789012345678901234567890 integer not null,
CHECK (int_col_12345678901234567890123456789012345678901234567890 > 100)
);
SELECT master_create_distributed_table('sneaky_name_lengths', 'col1', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2');
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('sneaky_name_lengths', 'col1', 'hash');
create_distributed_table
--------------------------
(1 row)
@ -231,6 +224,8 @@ SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.sneaky_n
(1 row)
\c - - - :master_port
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 2;
DROP TABLE sneaky_name_lengths CASCADE;
-- verify that named constraint with too-long name gets hashed properly
CREATE TABLE sneaky_name_lengths (
@ -239,15 +234,9 @@ CREATE TABLE sneaky_name_lengths (
int_col_12345678901234567890123456789012345678901234567890 integer not null,
constraint unique_12345678901234567890123456789012345678901234567890 UNIQUE (col1)
);
SELECT master_create_distributed_table('sneaky_name_lengths', 'col1', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2');
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('sneaky_name_lengths', 'col1', 'hash');
create_distributed_table
--------------------------
(1 row)
@ -260,21 +249,17 @@ SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2');
(1 row)
\c - - - :master_port
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 2;
DROP TABLE sneaky_name_lengths CASCADE;
-- Verify that much larger shardIds are handled properly
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 2250000000000;
CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 (
col1 integer not null,
col2 integer not null);
SELECT master_create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('too_long_12345678901234567890123456789012345678901234567890', '2', '2');
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash');
create_distributed_table
--------------------------
(1 row)
@ -288,20 +273,16 @@ SELECT master_create_worker_shards('too_long_12345678901234567890123456789012345
(2 rows)
\c - - - :master_port
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 2;
DROP TABLE too_long_12345678901234567890123456789012345678901234567890 CASCADE;
-- Verify that multi-byte boundaries are respected for databases with UTF8 encoding.
CREATE TABLE U&"elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D" UESCAPE '!' (
col1 integer not null PRIMARY KEY,
col2 integer not null);
SELECT master_create_distributed_table(U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!', 'col1', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards(U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!', '2', '2');
master_create_worker_shards
-----------------------------
SELECT create_distributed_table(U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!', 'col1', 'hash');
create_distributed_table
--------------------------
(1 row)
@ -332,20 +313,16 @@ WHERE logicalrelid = U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!0
(2 rows)
\c - - - :master_port
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 2;
-- Verify that shard_name UDF supports schemas
CREATE SCHEMA multi_name_lengths;
CREATE TABLE multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890 (
col1 integer not null,
col2 integer not null);
SELECT master_create_distributed_table('multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890', 2, 1);
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash');
create_distributed_table
--------------------------
(1 row)

View File

@ -57,9 +57,9 @@ CREATE TABLE varchar_partitioned_table
(
varchar_column varchar(100)
);
SELECT master_create_distributed_table('varchar_partitioned_table', 'varchar_column', 'append');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('varchar_partitioned_table', 'varchar_column', 'append');
create_distributed_table
--------------------------
(1 row)
@ -85,9 +85,9 @@ CREATE TABLE array_partitioned_table
(
array_column text[]
);
SELECT master_create_distributed_table('array_partitioned_table', 'array_column', 'append');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('array_partitioned_table', 'array_column', 'append');
create_distributed_table
--------------------------
(1 row)
@ -121,9 +121,9 @@ CREATE TABLE composite_partitioned_table
(
composite_column composite_type
);
SELECT master_create_distributed_table('composite_partitioned_table', 'composite_column', 'append');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('composite_partitioned_table', 'composite_column', 'append');
create_distributed_table
--------------------------
(1 row)

View File

@ -280,15 +280,10 @@ CREATE TABLE plpgsql_table (
key int,
value int
);
SELECT master_create_distributed_table('plpgsql_table','key','hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('plpgsql_table',4,1);
master_create_worker_shards
-----------------------------
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('plpgsql_table','key','hash');
create_distributed_table
--------------------------
(1 row)

View File

@ -255,15 +255,10 @@ CREATE TABLE router_executor_table (
comment varchar(20),
stats test_composite_type
);
SELECT master_create_distributed_table('router_executor_table', 'id', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('router_executor_table', 2, 2);
master_create_worker_shards
-----------------------------
SET citus.shard_count TO 2;
SELECT create_distributed_table('router_executor_table', 'id', 'hash');
create_distributed_table
--------------------------
(1 row)
@ -344,15 +339,11 @@ CREATE TABLE prepare_table (
key int,
value int
);
SELECT master_create_distributed_table('prepare_table','key','hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('prepare_table',4,1);
master_create_worker_shards
-----------------------------
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('prepare_table','key','hash');
create_distributed_table
--------------------------
(1 row)
@ -1049,15 +1040,11 @@ CREATE OR REPLACE FUNCTION immutable_bleat(text) RETURNS int LANGUAGE plpgsql IM
\c - - - :master_port
-- test table
CREATE TABLE test_table (test_id integer NOT NULL, data text);
SELECT master_create_distributed_table('test_table', 'test_id', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('test_table', 2, 2);
master_create_worker_shards
-----------------------------
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 2;
SELECT create_distributed_table('test_table', 'test_id', 'hash');
create_distributed_table
--------------------------
(1 row)

View File

@ -31,16 +31,10 @@ CREATE FUNCTION print_sorted_shard_intervals(regclass)
-- ===================================================================
-- create distributed table observe shard pruning
CREATE TABLE pruning ( species text, last_pruned date, plant_id integer );
SELECT master_create_distributed_table('pruning', 'species', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
-- create worker shards
SELECT master_create_worker_shards('pruning', 4, 1);
master_create_worker_shards
-----------------------------
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('pruning', 'species', 'hash');
create_distributed_table
--------------------------
(1 row)
@ -134,9 +128,9 @@ SELECT print_sorted_shard_intervals('pruning');
-- create range distributed table observe shard pruning
CREATE TABLE pruning_range ( species text, last_pruned date, plant_id integer );
SELECT master_create_distributed_table('pruning_range', 'species', 'range');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('pruning_range', 'species', 'range');
create_distributed_table
--------------------------
(1 row)

View File

@ -11,16 +11,12 @@ CREATE INDEX ON customer_engagements (id);
CREATE INDEX ON customer_engagements (created_at);
CREATE INDEX ON customer_engagements (event_data);
-- distribute the table
SELECT master_create_distributed_table('customer_engagements', 'id', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
-- create a single shard on the first worker
SELECT master_create_worker_shards('customer_engagements', 1, 2);
master_create_worker_shards
-----------------------------
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 2;
SELECT create_distributed_table('customer_engagements', 'id', 'hash');
create_distributed_table
--------------------------
(1 row)
@ -105,17 +101,13 @@ CREATE FOREIGN TABLE remote_engagements (
event_data text
) SERVER fake_fdw_server;
-- distribute the table
SELECT master_create_distributed_table('remote_engagements', 'id', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
-- create a single shard on the first worker
SELECT master_create_worker_shards('remote_engagements', 1, 2);
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 2;
SELECT create_distributed_table('remote_engagements', 'id', 'hash');
NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined
master_create_worker_shards
-----------------------------
create_distributed_table
--------------------------
(1 row)

View File

@ -127,27 +127,18 @@ FUNCTION 1 test_udt_hash(test_udt);
-- Connect to master
\c - - - :master_port
-- Distribute and populate the two tables.
SELECT master_create_distributed_table('repartition_udt', 'pk', 'hash');
master_create_distributed_table
---------------------------------
SET citus.shard_count TO 3;
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('repartition_udt', 'pk', 'hash');
create_distributed_table
--------------------------
(1 row)
SELECT master_create_worker_shards('repartition_udt', 3, 1);
master_create_worker_shards
-----------------------------
(1 row)
SELECT master_create_distributed_table('repartition_udt_other', 'pk', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('repartition_udt_other', 5, 1);
master_create_worker_shards
-----------------------------
SET citus.shard_count TO 5;
SELECT create_distributed_table('repartition_udt_other', 'pk', 'hash');
create_distributed_table
--------------------------
(1 row)

View File

@ -394,7 +394,7 @@ ORDER BY logicalrelid;
logicalrelid | partmethod | colocationid | repmodel
-----------------------------------------+------------+--------------+----------
replicate_reference_table_reference_one | n | 1370002 | t
replicate_reference_table_hash | h | 1360005 | c
replicate_reference_table_hash | h | 1360004 | c
(2 rows)
BEGIN;

View File

@ -7,15 +7,9 @@ CREATE TABLE multi_shard_modify_test (
t_key integer not null,
t_name varchar(25) not null,
t_value integer not null);
SELECT master_create_distributed_table('multi_shard_modify_test', 't_key', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('multi_shard_modify_test', 4, 2);
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('multi_shard_modify_test', 't_key', 'hash');
create_distributed_table
--------------------------
(1 row)
@ -71,15 +65,9 @@ CREATE TABLE temp_nations(name text, key integer);
SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test USING temp_nations WHERE multi_shard_modify_test.t_value = temp_nations.key AND temp_nations.name = ''foobar'' ');
ERROR: relation temp_nations is not distributed
-- commands with a USING clause are unsupported
SELECT master_create_distributed_table('temp_nations', 'name', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('temp_nations', 4, 2);
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('temp_nations', 'name', 'hash');
create_distributed_table
--------------------------
(1 row)

View File

@ -81,15 +81,10 @@ CREATE TABLE temp_table (
key int,
value int
);
SELECT master_create_distributed_table('temp_table','key','hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('temp_table',4,1);
master_create_worker_shards
-----------------------------
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('temp_table','key','hash');
create_distributed_table
--------------------------
(1 row)

View File

@ -794,15 +794,10 @@ CREATE TABLE subquery_pruning_varchar_test_table
a varchar,
b int
);
SELECT master_create_distributed_table('subquery_pruning_varchar_test_table', 'a', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('subquery_pruning_varchar_test_table', 4, 1);
master_create_worker_shards
-----------------------------
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('subquery_pruning_varchar_test_table', 'a', 'hash');
create_distributed_table
--------------------------
(1 row)

View File

@ -137,15 +137,10 @@ CREATE TABLE researchers (
lab_id int NOT NULL,
name text NOT NULL
);
SELECT master_create_distributed_table('researchers', 'lab_id', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('researchers', 2, 2);
master_create_worker_shards
-----------------------------
SET citus.shard_count TO 2;
SELECT create_distributed_table('researchers', 'lab_id', 'hash');
create_distributed_table
--------------------------
(1 row)

View File

@ -137,15 +137,10 @@ CREATE TABLE researchers (
lab_id int NOT NULL,
name text NOT NULL
);
SELECT master_create_distributed_table('researchers', 'lab_id', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('researchers', 2, 2);
master_create_worker_shards
-----------------------------
SET citus.shard_count TO 2;
SELECT create_distributed_table('researchers', 'lab_id', 'hash');
create_distributed_table
--------------------------
(1 row)

View File

@ -4,9 +4,9 @@
-- Tests around changing the schema and dropping of a distributed table
SET citus.next_shard_id TO 870000;
CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL);
SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('testtableddl', 'distributecol', 'append');
create_distributed_table
--------------------------
(1 row)
@ -29,9 +29,9 @@ COMMIT;
\set VERBOSITY default
-- recreate testtableddl
CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL);
SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('testtableddl', 'distributecol', 'append');
create_distributed_table
--------------------------
(1 row)
@ -41,9 +41,9 @@ DROP TABLE testtableddl;
CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL);
-- create table and do create empty shard test here, too
SET citus.shard_replication_factor TO 1;
SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('testtableddl', 'distributecol', 'append');
create_distributed_table
--------------------------
(1 row)
@ -90,15 +90,11 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port);
-- create a table with a SERIAL column
CREATE TABLE testserialtable(id serial, group_id integer);
SELECT master_create_distributed_table('testserialtable', 'group_id', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('testserialtable', 2, 1);
master_create_worker_shards
-----------------------------
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('testserialtable', 'group_id', 'hash');
create_distributed_table
--------------------------
(1 row)

View File

@ -16,9 +16,9 @@ SET citus.explain_distributed_queries TO off;
-- and shard placement data into system catalogs. We next run Explain command,
-- and check that tasks are assigned to worker nodes as expected.
CREATE TABLE task_assignment_test_table (test_id integer);
SELECT master_create_distributed_table('task_assignment_test_table', 'test_id', 'append');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('task_assignment_test_table', 'test_id', 'append');
create_distributed_table
--------------------------
(1 row)

View File

@ -16,9 +16,9 @@ SET citus.explain_distributed_queries TO off;
-- and shard placement data into system catalogs. We next run Explain command,
-- and check that tasks are assigned to worker nodes as expected.
CREATE TABLE task_assignment_test_table (test_id integer);
SELECT master_create_distributed_table('task_assignment_test_table', 'test_id', 'append');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('task_assignment_test_table', 'test_id', 'append');
create_distributed_table
--------------------------
(1 row)

View File

@ -7,9 +7,9 @@ SET citus.next_shard_id TO 1210000;
-- expect all shards to be dropped
--
CREATE TABLE test_truncate_append(a int);
SELECT master_create_distributed_table('test_truncate_append', 'a', 'append');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('test_truncate_append', 'a', 'append');
create_distributed_table
--------------------------
(1 row)
@ -75,9 +75,9 @@ DROP TABLE test_truncate_append;
-- expect shard to be present, data to be truncated
--
CREATE TABLE test_truncate_range(a int);
SELECT master_create_distributed_table('test_truncate_range', 'a', 'range');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('test_truncate_range', 'a', 'range');
create_distributed_table
--------------------------
(1 row)
@ -237,16 +237,11 @@ SELECT count(*) FROM test_truncate_hash;
DROP TABLE test_truncate_hash;
-- test with table with spaces in it
SET citus.shard_replication_factor TO 1;
CREATE TABLE "a b hash" (a int, b int);
SELECT master_create_distributed_table('"a b hash"', 'a', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('"a b hash"', 4, 1);
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('"a b hash"', 'a', 'hash');
create_distributed_table
--------------------------
(1 row)
@ -266,9 +261,9 @@ SELECT * from "a b hash";
DROP TABLE "a b hash";
-- now with append
CREATE TABLE "a b append" (a int, b int);
SELECT master_create_distributed_table('"a b append"', 'a', 'append');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('"a b append"', 'a', 'append');
create_distributed_table
--------------------------
(1 row)

View File

@ -240,7 +240,7 @@ WHERE
logicalrelid = 'upgrade_reference_table_one_worker'::regclass;
partmethod | partkeyisnull | colocationid | repmodel
------------+---------------+--------------+----------
h | f | 1360001 | c
h | f | 1360000 | c
(1 row)
SELECT
@ -262,7 +262,7 @@ WHERE colocationid IN
WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass);
colocationid | shardcount | replicationfactor | distributioncolumntype
--------------+------------+-------------------+------------------------
1360001 | 1 | 1 | 23
1360000 | 1 | 1 | 23
(1 row)
SELECT
@ -354,7 +354,7 @@ WHERE
logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass;
partmethod | partkeyisnull | colocationid | repmodel
------------+---------------+--------------+----------
h | f | 1360002 | c
h | f | 1360001 | c
(1 row)
SELECT
@ -376,7 +376,7 @@ WHERE colocationid IN
WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass);
colocationid | shardcount | replicationfactor | distributioncolumntype
--------------+------------+-------------------+------------------------
1360002 | 1 | 2 | 23
1360001 | 1 | 2 | 23
(1 row)
SELECT
@ -468,7 +468,7 @@ WHERE
logicalrelid = 'upgrade_reference_table_both_healthy'::regclass;
partmethod | partkeyisnull | colocationid | repmodel
------------+---------------+--------------+----------
h | f | 1360003 | c
h | f | 1360002 | c
(1 row)
SELECT
@ -490,7 +490,7 @@ WHERE colocationid IN
WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass);
colocationid | shardcount | replicationfactor | distributioncolumntype
--------------+------------+-------------------+------------------------
1360003 | 1 | 2 | 23
1360002 | 1 | 2 | 23
(1 row)
SELECT
@ -584,7 +584,7 @@ WHERE
logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass;
partmethod | partkeyisnull | colocationid | repmodel
------------+---------------+--------------+----------
h | f | 1360004 | c
h | f | 1360003 | c
(1 row)
SELECT
@ -606,7 +606,7 @@ WHERE colocationid IN
WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass);
colocationid | shardcount | replicationfactor | distributioncolumntype
--------------+------------+-------------------+------------------------
1360004 | 1 | 1 | 23
1360003 | 1 | 1 | 23
(1 row)
SELECT
@ -639,7 +639,7 @@ WHERE
logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass;
partmethod | partkeyisnull | colocationid | repmodel
------------+---------------+--------------+----------
h | f | 1360004 | c
h | f | 1360003 | c
(1 row)
SELECT
@ -661,7 +661,7 @@ WHERE colocationid IN
WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass);
colocationid | shardcount | replicationfactor | distributioncolumntype
--------------+------------+-------------------+------------------------
1360004 | 1 | 1 | 23
1360003 | 1 | 1 | 23
(1 row)
SELECT
@ -697,7 +697,7 @@ WHERE
logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass;
partmethod | partkeyisnull | colocationid | repmodel
------------+---------------+--------------+----------
h | f | 1360004 | c
h | f | 1360003 | c
(1 row)
SELECT
@ -719,7 +719,7 @@ WHERE colocationid IN
WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass);
colocationid | shardcount | replicationfactor | distributioncolumntype
--------------+------------+-------------------+------------------------
1360004 | 1 | 1 | 23
1360003 | 1 | 1 | 23
(1 row)
SELECT
@ -823,7 +823,7 @@ WHERE
logicalrelid = 'upgrade_reference_table_mx'::regclass;
partmethod | partkeyisnull | colocationid | repmodel
------------+---------------+--------------+----------
h | f | 1360005 | s
h | f | 1360004 | s
(1 row)
SELECT
@ -845,7 +845,7 @@ WHERE colocationid IN
WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass);
colocationid | shardcount | replicationfactor | distributioncolumntype
--------------+------------+-------------------+------------------------
1360005 | 1 | 1 | 23
1360004 | 1 | 1 | 23
(1 row)
SELECT
@ -875,7 +875,7 @@ WHERE
logicalrelid = 'upgrade_reference_table_mx'::regclass;
partmethod | partkeyisnull | colocationid | repmodel
------------+---------------+--------------+----------
h | f | 1360005 | s
h | f | 1360004 | s
(1 row)
SELECT
@ -897,7 +897,7 @@ WHERE colocationid IN
WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass);
colocationid | shardcount | replicationfactor | distributioncolumntype
--------------+------------+-------------------+------------------------
1360005 | 1 | 1 | 23
1360004 | 1 | 1 | 23
(1 row)
SELECT
@ -944,7 +944,7 @@ WHERE
logicalrelid = 'upgrade_reference_table_mx'::regclass;
partmethod | partkeyisnull | colocationid | repmodel
------------+---------------+--------------+----------
h | f | 1360006 | c
h | f | 1360005 | c
(1 row)
SELECT
@ -966,7 +966,7 @@ WHERE colocationid IN
WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass);
colocationid | shardcount | replicationfactor | distributioncolumntype
--------------+------------+-------------------+------------------------
1360006 | 1 | 2 | 23
1360005 | 1 | 2 | 23
(1 row)
SELECT

View File

@ -7,15 +7,9 @@ CREATE TABLE upsert_test
third_col int
);
-- distribute the table and create shards
SELECT master_create_distributed_table('upsert_test', 'part_key', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('upsert_test', '4', '2');
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('upsert_test', 'part_key', 'hash');
create_distributed_table
--------------------------
(1 row)
@ -147,15 +141,9 @@ CREATE TABLE upsert_test_2
PRIMARY KEY (part_key, other_col)
);
-- distribute the table and create shards
SELECT master_create_distributed_table('upsert_test_2', 'part_key', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('upsert_test_2', '4', '2');
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('upsert_test_2', 'part_key', 'hash');
create_distributed_table
--------------------------
(1 row)
@ -174,15 +162,9 @@ CREATE TABLE upsert_test_3
-- note that this is not a unique index
CREATE INDEX idx_ups_test ON upsert_test_3(part_key);
-- distribute the table and create shards
SELECT master_create_distributed_table('upsert_test_3', 'part_key', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('upsert_test_3', '4', '2');
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('upsert_test_3', 'part_key', 'hash');
create_distributed_table
--------------------------
(1 row)
@ -196,15 +178,9 @@ CREATE TABLE upsert_test_4
count int
);
-- distribute the table and create shards
SELECT master_create_distributed_table('upsert_test_4', 'part_key', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('upsert_test_4', '4', '2');
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('upsert_test_4', 'part_key', 'hash');
create_distributed_table
--------------------------
(1 row)
@ -225,16 +201,11 @@ SELECT * FROM upsert_test_4;
(1 row)
-- now test dropped columns
SET citus.shard_replication_factor TO 1;
CREATE TABLE dropcol_distributed(key int primary key, drop1 int, keep1 text, drop2 numeric, keep2 float);
SELECT master_create_distributed_table('dropcol_distributed', 'key', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('dropcol_distributed', 4, 1);
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('dropcol_distributed', 'key', 'hash');
create_distributed_table
--------------------------
(1 row)

View File

@ -10,16 +10,12 @@ SELECT substring(:'server_version', '\d+')::int > 10 as version_above_ten;
-- ===================================================================
-- test utility statement functionality
-- ===================================================================
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 1;
CREATE TABLE sharded_table ( name text, id bigint );
SELECT master_create_distributed_table('sharded_table', 'id', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('sharded_table', 2, 1);
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('sharded_table', 'id', 'hash');
create_distributed_table
--------------------------
(1 row)
@ -167,16 +163,12 @@ ERROR: no locks specified
DROP TABLE sharded_table;
-- VACUUM tests
-- create a table with a single shard (for convenience)
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 2;
CREATE TABLE dustbunnies (id integer, name text, age integer);
SELECT master_create_distributed_table('dustbunnies', 'id', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('dustbunnies', 1, 2);
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('dustbunnies', 'id', 'hash');
create_distributed_table
--------------------------
(1 row)

View File

@ -21,7 +21,7 @@ CREATE TABLE lineitem_range (
l_shipinstruct char(25) not null,
l_shipmode char(10) not null,
l_comment varchar(44) not null );
SELECT master_create_distributed_table('lineitem_range', 'l_orderkey', 'range');
SELECT create_distributed_table('lineitem_range', 'l_orderkey', 'range');
SELECT master_create_empty_shard('lineitem_range') AS new_shard_id
\gset
@ -88,8 +88,8 @@ CREATE TABLE lineitem_hash (
l_shipinstruct char(25) not null,
l_shipmode char(10) not null,
l_comment varchar(44) not null );
SELECT master_create_distributed_table('lineitem_hash', 'l_orderkey', 'hash');
SELECT master_create_worker_shards('lineitem_hash', 4, 1);
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('lineitem_hash', 'l_orderkey', 'hash');
\copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'

View File

@ -18,7 +18,7 @@ CREATE TABLE aggregate_type (
float_value float(20) not null,
double_value float(40) not null,
interval_value interval not null);
SELECT master_create_distributed_table('aggregate_type', 'float_value', 'append');
SELECT create_distributed_table('aggregate_type', 'float_value', 'append');
\copy aggregate_type FROM '@abs_srcdir@/data/agg_type.data'

View File

@ -28,7 +28,7 @@ CREATE TABLE lineitem_alter (
l_comment varchar(44) not null
)
WITH ( fillfactor = 80 );
SELECT master_create_distributed_table('lineitem_alter', 'l_orderkey', 'append');
SELECT create_distributed_table('lineitem_alter', 'l_orderkey', 'append');
\copy lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
-- verify that the storage options made it to the table definitions
@ -248,8 +248,9 @@ SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter';
-- Create single-shard table (to avoid deadlocks in the upcoming test hackery)
CREATE TABLE single_shard_items (id integer NOT NULL, name text);
SELECT master_create_distributed_table('single_shard_items', 'id', 'hash');
SELECT master_create_worker_shards('single_shard_items', 1, 2);
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 2;
SELECT create_distributed_table('single_shard_items', 'id', 'hash');
-- Verify that ALTER TABLE .. REPLICATION IDENTITY [USING INDEX]* .. works
CREATE UNIQUE INDEX replica_idx on single_shard_items(id);
@ -346,8 +347,8 @@ RESET citus.multi_shard_commit_protocol;
-- verify that not any of shard placements are marked as failed when a query failure occurs
CREATE TABLE test_ab (a int, b int);
SELECT master_create_distributed_table('test_ab', 'a', 'hash');
SELECT master_create_worker_shards('test_ab', 8, 2);
SET citus.shard_count TO 8;
SELECT create_distributed_table('test_ab', 'a', 'hash');
INSERT INTO test_ab VALUES (2, 10);
INSERT INTO test_ab VALUES (2, 11);
CREATE UNIQUE INDEX temp_unique_index_1 ON test_ab(a);
@ -462,6 +463,8 @@ SELECT indexname, tablename FROM pg_indexes WHERE tablename like 'lineitem_alte
\c - - - :master_port
-- verify alter table and drop sequence in the same transaction does not cause deadlock
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 2;
CREATE TABLE sequence_deadlock_test (a serial, b serial);
SELECT create_distributed_table('sequence_deadlock_test', 'a');

View File

@ -19,15 +19,16 @@ CREATE TABLE multi_append_table_to_shard_left
left_number INTEGER not null,
left_text TEXT not null
);
SELECT master_create_distributed_table('multi_append_table_to_shard_left', 'left_number', 'append');
SELECT create_distributed_table('multi_append_table_to_shard_left', 'left_number', 'append');
CREATE TABLE multi_append_table_to_shard_right_reference_hash
(
right_number INTEGER not null,
right_text TEXT not null
);
SELECT master_create_distributed_table('multi_append_table_to_shard_right_reference_hash', 'right_number', 'hash');
SELECT master_create_worker_shards('multi_append_table_to_shard_right_reference_hash', 1, 1);
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('multi_append_table_to_shard_right_reference_hash', 'right_number', 'hash');
-- Replicate 'left' table on both workers
SELECT set_config('citus.shard_replication_factor', '2', false);

View File

@ -4,7 +4,8 @@
SET citus.next_shard_id TO 240000;
SET citus.shard_count TO 8;
SET citus.shard_replication_factor TO 1;
CREATE TABLE lineitem_hash (
l_orderkey bigint not null,
@ -25,8 +26,7 @@ CREATE TABLE lineitem_hash (
l_comment varchar(44) not null,
PRIMARY KEY(l_orderkey, l_linenumber) );
SELECT master_create_distributed_table('lineitem_hash', 'l_orderkey', 'hash');
SELECT master_create_worker_shards('lineitem_hash', 8, 1);
SELECT create_distributed_table('lineitem_hash', 'l_orderkey', 'hash');
\copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'

View File

@ -632,7 +632,7 @@ COPY numbers_hash FROM STDIN WITH (FORMAT 'csv');
-- verify each placement is active
SELECT shardid, shardstate, nodename, nodeport
FROM pg_dist_shard_placement join pg_dist_shard using(shardid)
WHERE logicalrelid = 'numbers_hash'::regclass order by placementid;
WHERE logicalrelid = 'numbers_hash'::regclass order by placementid, shardid, nodeport;
-- create a reference table
CREATE TABLE numbers_reference(a int, b int);
@ -647,7 +647,7 @@ CREATE TABLE numbers_hash_other(a int, b int);
SELECT create_distributed_table('numbers_hash_other', 'a');
SELECT shardid, shardstate, nodename, nodeport
FROM pg_dist_shard_placement join pg_dist_shard using(shardid)
WHERE logicalrelid = 'numbers_hash_other'::regclass order by placementid;
WHERE logicalrelid = 'numbers_hash_other'::regclass order by placementid, shardid, nodeport;
-- manually corrupt pg_dist_shard such that both copies of one shard is placed in
-- worker_1. This is to test the behavior when no replica of a shard is accessible.
@ -675,7 +675,7 @@ COPY numbers_hash FROM STDIN WITH (FORMAT 'csv');
-- verify shards in the first worker as marked invalid
SELECT shardid, shardstate, nodename, nodeport
FROM pg_dist_shard_placement join pg_dist_shard using(shardid)
WHERE logicalrelid = 'numbers_hash'::regclass order by placementid;
WHERE logicalrelid = 'numbers_hash'::regclass order by placementid, shardid, nodeport;
-- try to insert into a reference table copy should fail
COPY numbers_reference FROM STDIN WITH (FORMAT 'csv');
@ -702,7 +702,7 @@ COPY numbers_hash_other FROM STDIN WITH (FORMAT 'csv');
-- since copy has failed altogether
SELECT shardid, shardstate, nodename, nodeport
FROM pg_dist_shard_placement join pg_dist_shard using(shardid)
WHERE logicalrelid = 'numbers_hash_other'::regclass order by placementid;
WHERE logicalrelid = 'numbers_hash_other'::regclass order by placementid, shardid, nodeport;
-- re-enable test_user on the first worker
\c - :default_user - :worker_1_port
@ -748,7 +748,7 @@ SELECT count(a) FROM numbers_hash;
-- verify shard is still marked as valid
SELECT shardid, shardstate, nodename, nodeport
FROM pg_dist_shard_placement join pg_dist_shard using(shardid)
WHERE logicalrelid = 'numbers_hash'::regclass order by placementid;
WHERE logicalrelid = 'numbers_hash'::regclass order by placementid, shardid, nodeport;
DROP TABLE numbers_hash;
SELECT * FROM run_command_on_workers('DROP USER test_user');
@ -815,7 +815,7 @@ ABORT;
-- copy into a table with a JSONB column
CREATE TABLE copy_jsonb (key text, value jsonb, extra jsonb default '["default"]'::jsonb);
SELECT create_distributed_table('copy_jsonb', 'key');
SELECT create_distributed_table('copy_jsonb', 'key', colocate_with => 'none');
-- JSONB from text should work
\COPY copy_jsonb (key, value) FROM STDIN

View File

@ -8,7 +8,7 @@ CREATE TABLE nation (
n_name char(25) not null,
n_regionkey integer not null,
n_comment varchar(152));
SELECT master_create_distributed_table('tpch.nation', 'n_nationkey', 'append');
SELECT create_distributed_table('tpch.nation', 'n_nationkey', 'append');
\copy tpch.nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|'

View File

@ -147,7 +147,8 @@ test: multi_outer_join
# is independed from the rest of the group, it is added to increase parallelism.
# ---
test: multi_create_fdw
test: multi_complex_count_distinct multi_select_distinct multi_modifications
test: multi_complex_count_distinct multi_select_distinct
test: multi_modifications
test: multi_distribution_metadata
test: multi_generate_ddl_commands multi_create_shards multi_prune_shard_list multi_repair_shards
test: multi_upsert multi_simple_queries multi_create_insert_proxy multi_data_types

View File

@ -19,9 +19,9 @@ CREATE TABLE lineitem_range (
l_shipinstruct char(25) not null,
l_shipmode char(10) not null,
l_comment varchar(44) not null );
SELECT master_create_distributed_table('lineitem_range', 'l_orderkey', 'range');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('lineitem_range', 'l_orderkey', 'range');
create_distributed_table
--------------------------
(1 row)
@ -127,15 +127,10 @@ CREATE TABLE lineitem_hash (
l_shipinstruct char(25) not null,
l_shipmode char(10) not null,
l_comment varchar(44) not null );
SELECT master_create_distributed_table('lineitem_hash', 'l_orderkey', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('lineitem_hash', 4, 1);
master_create_worker_shards
-----------------------------
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('lineitem_hash', 'l_orderkey', 'hash');
create_distributed_table
--------------------------
(1 row)

View File

@ -33,9 +33,9 @@ CREATE TABLE aggregate_type (
float_value float(20) not null,
double_value float(40) not null,
interval_value interval not null);
SELECT master_create_distributed_table('aggregate_type', 'float_value', 'append');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('aggregate_type', 'float_value', 'append');
create_distributed_table
--------------------------
(1 row)

View File

@ -24,9 +24,9 @@ CREATE TABLE lineitem_alter (
l_comment varchar(44) not null
)
WITH ( fillfactor = 80 );
SELECT master_create_distributed_table('lineitem_alter', 'l_orderkey', 'append');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('lineitem_alter', 'l_orderkey', 'append');
create_distributed_table
--------------------------
(1 row)
@ -548,15 +548,11 @@ SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter';
-- Create single-shard table (to avoid deadlocks in the upcoming test hackery)
CREATE TABLE single_shard_items (id integer NOT NULL, name text);
SELECT master_create_distributed_table('single_shard_items', 'id', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('single_shard_items', 1, 2);
master_create_worker_shards
-----------------------------
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 2;
SELECT create_distributed_table('single_shard_items', 'id', 'hash');
create_distributed_table
--------------------------
(1 row)
@ -731,15 +727,10 @@ SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter';
RESET citus.multi_shard_commit_protocol;
-- verify that not any of shard placements are marked as failed when a query failure occurs
CREATE TABLE test_ab (a int, b int);
SELECT master_create_distributed_table('test_ab', 'a', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('test_ab', 8, 2);
master_create_worker_shards
-----------------------------
SET citus.shard_count TO 8;
SELECT create_distributed_table('test_ab', 'a', 'hash');
create_distributed_table
--------------------------
(1 row)
@ -958,6 +949,8 @@ SELECT indexname, tablename FROM pg_indexes WHERE tablename like 'lineitem_alte
\c - - - :master_port
-- verify alter table and drop sequence in the same transaction does not cause deadlock
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 2;
CREATE TABLE sequence_deadlock_test (a serial, b serial);
SELECT create_distributed_table('sequence_deadlock_test', 'a');
create_distributed_table

View File

@ -19,9 +19,9 @@ CREATE TABLE multi_append_table_to_shard_left
left_number INTEGER not null,
left_text TEXT not null
);
SELECT master_create_distributed_table('multi_append_table_to_shard_left', 'left_number', 'append');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('multi_append_table_to_shard_left', 'left_number', 'append');
create_distributed_table
--------------------------
(1 row)
@ -30,15 +30,11 @@ CREATE TABLE multi_append_table_to_shard_right_reference_hash
right_number INTEGER not null,
right_text TEXT not null
);
SELECT master_create_distributed_table('multi_append_table_to_shard_right_reference_hash', 'right_number', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('multi_append_table_to_shard_right_reference_hash', 1, 1);
master_create_worker_shards
-----------------------------
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('multi_append_table_to_shard_right_reference_hash', 'right_number', 'hash');
create_distributed_table
--------------------------
(1 row)

View File

@ -2,6 +2,8 @@
-- COMPLEX_COUNT_DISTINCT
--
SET citus.next_shard_id TO 240000;
SET citus.shard_count TO 8;
SET citus.shard_replication_factor TO 1;
CREATE TABLE lineitem_hash (
l_orderkey bigint not null,
l_partkey integer not null,
@ -21,15 +23,9 @@ CREATE TABLE lineitem_hash (
l_comment varchar(44) not null,
PRIMARY KEY(l_orderkey, l_linenumber) );
SELECT master_create_distributed_table('lineitem_hash', 'l_orderkey', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('lineitem_hash', 8, 1);
master_create_worker_shards
-----------------------------
SELECT create_distributed_table('lineitem_hash', 'l_orderkey', 'hash');
create_distributed_table
--------------------------
(1 row)

View File

@ -826,17 +826,17 @@ COPY numbers_hash FROM STDIN WITH (FORMAT 'csv');
-- verify each placement is active
SELECT shardid, shardstate, nodename, nodeport
FROM pg_dist_shard_placement join pg_dist_shard using(shardid)
WHERE logicalrelid = 'numbers_hash'::regclass order by placementid;
WHERE logicalrelid = 'numbers_hash'::regclass order by placementid, shardid, nodeport;
shardid | shardstate | nodename | nodeport
---------+------------+-----------+----------
560171 | 1 | localhost | 57637
560171 | 1 | localhost | 57638
560172 | 1 | localhost | 57638
560171 | 1 | localhost | 57637
560172 | 1 | localhost | 57637
560173 | 1 | localhost | 57637
560172 | 1 | localhost | 57638
560173 | 1 | localhost | 57638
560174 | 1 | localhost | 57638
560173 | 1 | localhost | 57637
560174 | 1 | localhost | 57637
560174 | 1 | localhost | 57638
(8 rows)
-- create a reference table
@ -858,17 +858,17 @@ SELECT create_distributed_table('numbers_hash_other', 'a');
SELECT shardid, shardstate, nodename, nodeport
FROM pg_dist_shard_placement join pg_dist_shard using(shardid)
WHERE logicalrelid = 'numbers_hash_other'::regclass order by placementid;
WHERE logicalrelid = 'numbers_hash_other'::regclass order by placementid, shardid, nodeport;
shardid | shardstate | nodename | nodeport
---------+------------+-----------+----------
560176 | 1 | localhost | 57638
560176 | 1 | localhost | 57637
560177 | 1 | localhost | 57637
560176 | 1 | localhost | 57638
560177 | 1 | localhost | 57638
560178 | 1 | localhost | 57638
560177 | 1 | localhost | 57637
560178 | 1 | localhost | 57637
560179 | 1 | localhost | 57637
560178 | 1 | localhost | 57638
560179 | 1 | localhost | 57638
560179 | 1 | localhost | 57637
(8 rows)
-- manually corrupt pg_dist_shard such that both copies of one shard is placed in
@ -897,17 +897,17 @@ CONTEXT: COPY numbers_hash, line 6: "6,6"
-- verify shards in the first worker as marked invalid
SELECT shardid, shardstate, nodename, nodeport
FROM pg_dist_shard_placement join pg_dist_shard using(shardid)
WHERE logicalrelid = 'numbers_hash'::regclass order by placementid;
WHERE logicalrelid = 'numbers_hash'::regclass order by placementid, shardid, nodeport;
shardid | shardstate | nodename | nodeport
---------+------------+-----------+----------
560171 | 3 | localhost | 57637
560171 | 1 | localhost | 57638
560172 | 1 | localhost | 57638
560171 | 3 | localhost | 57637
560172 | 3 | localhost | 57637
560173 | 3 | localhost | 57637
560172 | 1 | localhost | 57638
560173 | 1 | localhost | 57638
560174 | 1 | localhost | 57638
560173 | 3 | localhost | 57637
560174 | 3 | localhost | 57637
560174 | 1 | localhost | 57638
(8 rows)
-- try to insert into a reference table copy should fail
@ -941,17 +941,17 @@ CONTEXT: COPY numbers_hash_other, line 1: "1,1"
-- since copy has failed altogether
SELECT shardid, shardstate, nodename, nodeport
FROM pg_dist_shard_placement join pg_dist_shard using(shardid)
WHERE logicalrelid = 'numbers_hash_other'::regclass order by placementid;
WHERE logicalrelid = 'numbers_hash_other'::regclass order by placementid, shardid, nodeport;
shardid | shardstate | nodename | nodeport
---------+------------+-----------+----------
560176 | 1 | localhost | 57637
560176 | 1 | localhost | 57637
560177 | 1 | localhost | 57637
560177 | 1 | localhost | 57638
560178 | 1 | localhost | 57638
560177 | 1 | localhost | 57637
560178 | 1 | localhost | 57637
560179 | 1 | localhost | 57637
560178 | 1 | localhost | 57638
560179 | 1 | localhost | 57638
560179 | 1 | localhost | 57637
(8 rows)
-- re-enable test_user on the first worker
@ -993,17 +993,17 @@ SELECT count(a) FROM numbers_hash;
-- verify shard is still marked as valid
SELECT shardid, shardstate, nodename, nodeport
FROM pg_dist_shard_placement join pg_dist_shard using(shardid)
WHERE logicalrelid = 'numbers_hash'::regclass order by placementid;
WHERE logicalrelid = 'numbers_hash'::regclass order by placementid, shardid, nodeport;
shardid | shardstate | nodename | nodeport
---------+------------+-----------+----------
560180 | 1 | localhost | 57637
560180 | 1 | localhost | 57638
560181 | 1 | localhost | 57638
560180 | 1 | localhost | 57637
560181 | 1 | localhost | 57637
560182 | 1 | localhost | 57637
560181 | 1 | localhost | 57638
560182 | 1 | localhost | 57638
560183 | 1 | localhost | 57638
560182 | 1 | localhost | 57637
560183 | 1 | localhost | 57637
560183 | 1 | localhost | 57638
(8 rows)
DROP TABLE numbers_hash;
@ -1094,7 +1094,7 @@ NOTICE: Copying data from local table...
ABORT;
-- copy into a table with a JSONB column
CREATE TABLE copy_jsonb (key text, value jsonb, extra jsonb default '["default"]'::jsonb);
SELECT create_distributed_table('copy_jsonb', 'key');
SELECT create_distributed_table('copy_jsonb', 'key', colocate_with => 'none');
create_distributed_table
--------------------------

View File

@ -5,9 +5,9 @@ CREATE TABLE nation (
n_name char(25) not null,
n_regionkey integer not null,
n_comment varchar(152));
SELECT master_create_distributed_table('tpch.nation', 'n_nationkey', 'append');
master_create_distributed_table
---------------------------------
SELECT create_distributed_table('tpch.nation', 'n_nationkey', 'append');
create_distributed_table
--------------------------
(1 row)

View File

@ -68,6 +68,8 @@ SELECT count(DISTINCT l_orderkey) as distinct_order_count, l_quantity FROM linei
-- Check that approximate count(distinct) works at a table in a schema other than public
-- create necessary objects
SET citus.next_shard_id TO 20000000;
SET citus.next_placement_id TO 20000000;
CREATE SCHEMA test_count_distinct_schema;
CREATE TABLE test_count_distinct_schema.nation_hash(
@ -76,8 +78,7 @@ CREATE TABLE test_count_distinct_schema.nation_hash(
n_regionkey integer not null,
n_comment varchar(152)
);
SELECT master_create_distributed_table('test_count_distinct_schema.nation_hash', 'n_nationkey', 'hash');
SELECT master_create_worker_shards('test_count_distinct_schema.nation_hash', 4, 2);
SELECT create_distributed_table('test_count_distinct_schema.nation_hash', 'n_nationkey', 'hash');
\copy test_count_distinct_schema.nation_hash FROM STDIN with delimiter '|';
0|ALGERIA|0|haggle. carefully final deposits detect slyly agai

View File

@ -1,9 +1,10 @@
SET citus.next_shard_id TO 1601000;
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 1;
CREATE TABLE tab9 (test_id integer NOT NULL, data int);
CREATE TABLE tab10 (test_id integer NOT NULL, data int);
SELECT master_create_distributed_table('tab9', 'test_id', 'hash');
SELECT create_distributed_table('tab9', 'test_id', 'hash');
SELECT master_create_distributed_table('tab10', 'test_id', 'hash');
SELECT master_create_worker_shards('tab9', 1, 1);
TRUNCATE tab9;
UPDATE pg_dist_shard SET logicalrelid = 'tab10'::regclass WHERE logicalrelid = 'tab9'::regclass;
TRUNCATE tab10;

View File

@ -208,9 +208,9 @@ SELECT * FROM run_command_on_workers('select 1') ORDER BY 2 ASC;
SELECT * FROM run_command_on_workers('select count(*) from pg_dist_partition') ORDER BY 2 ASC;
-- make sure run_on_all_placements respects shardstate
SET citus.shard_count TO 5;
CREATE TABLE check_placements (key int);
SELECT master_create_distributed_table('check_placements', 'key', 'hash');
SELECT master_create_worker_shards('check_placements', 5, 2);
SELECT create_distributed_table('check_placements', 'key', 'hash');
SELECT * FROM run_command_on_placements('check_placements', 'select 1');
UPDATE pg_dist_shard_placement SET shardstate = 3
WHERE shardid % 2 = 0 AND nodeport = :worker_1_port;
@ -219,11 +219,11 @@ DROP TABLE check_placements CASCADE;
-- make sure run_on_all_colocated_placements correctly detects colocation
CREATE TABLE check_colocated (key int);
SELECT master_create_distributed_table('check_colocated', 'key', 'hash');
SELECT master_create_worker_shards('check_colocated', 5, 2);
SELECT create_distributed_table('check_colocated', 'key', 'hash');
CREATE TABLE second_table (key int);
SELECT master_create_distributed_table('second_table', 'key', 'hash');
SELECT master_create_worker_shards('second_table', 4, 2);
SET citus.shard_count TO 4;
SELECT create_distributed_table('second_table', 'key', 'hash');
SELECT * FROM run_command_on_colocated_placements('check_colocated', 'second_table',
'select 1');
-- even when the difference is in replication factor, an error is thrown
@ -256,9 +256,10 @@ DROP TABLE check_colocated CASCADE;
DROP TABLE second_table CASCADE;
-- runs on all shards
SET citus.shard_count TO 5;
CREATE TABLE check_shards (key int);
SELECT master_create_distributed_table('check_shards', 'key', 'hash');
SELECT master_create_worker_shards('check_shards', 5, 2);
SELECT create_distributed_table('check_shards', 'key', 'hash');
SELECT * FROM run_command_on_shards('check_shards', 'select 1');
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid % 2 = 0;
SELECT * FROM run_command_on_shards('check_shards', 'select 1');

View File

@ -32,10 +32,12 @@ SELECT master_disable_node('localhost', :worker_2_port);
SELECT master_get_active_worker_nodes();
-- add some shard placements to the cluster
SET citus.shard_count TO 16;
SET citus.shard_replication_factor TO 1;
SELECT isactive FROM master_activate_node('localhost', :worker_2_port);
CREATE TABLE cluster_management_test (col_1 text, col_2 int);
SELECT master_create_distributed_table('cluster_management_test', 'col_1', 'hash');
SELECT master_create_worker_shards('cluster_management_test', 16, 1);
SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash');
-- see that there are some active placements in the candidate node
SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport=:worker_2_port;
@ -152,6 +154,7 @@ SELECT 1 FROM master_add_node('localhost', :worker_1_port);
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
-- check that a distributed table can be created after adding a node in a transaction
SET citus.shard_count TO 4;
SELECT master_remove_node('localhost', :worker_2_port);
BEGIN;

View File

@ -50,9 +50,10 @@ CREATE TABLE insert_target (
-- squelch WARNINGs that contain worker_port
SET client_min_messages TO ERROR;
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 1;
SELECT master_create_distributed_table('insert_target', 'id', 'hash');
SELECT master_create_worker_shards('insert_target', 2, 1);
SELECT create_distributed_table('insert_target', 'id', 'hash');
CREATE TEMPORARY SEQUENCE rows_inserted;
SELECT create_insert_proxy_for_table('insert_target', 'rows_inserted') AS proxy_tablename

View File

@ -53,25 +53,25 @@ CREATE TABLE table_to_distribute (
-- use the table WITH (OIDS) set
ALTER TABLE table_to_distribute SET WITH OIDS;
SELECT master_create_distributed_table('table_to_distribute', 'id', 'hash');
SELECT create_distributed_table('table_to_distribute', 'id', 'hash');
-- revert WITH (OIDS) from above
ALTER TABLE table_to_distribute SET WITHOUT OIDS;
-- use an index instead of table name
SELECT master_create_distributed_table('table_to_distribute_pkey', 'id', 'hash');
SELECT create_distributed_table('table_to_distribute_pkey', 'id', 'hash');
-- use a bad column name
SELECT master_create_distributed_table('table_to_distribute', 'bad_column', 'hash');
SELECT create_distributed_table('table_to_distribute', 'bad_column', 'hash');
-- use unrecognized partition type
SELECT master_create_distributed_table('table_to_distribute', 'name', 'unrecognized');
SELECT create_distributed_table('table_to_distribute', 'name', 'unrecognized');
-- use a partition column of a type lacking any default operator class
SELECT master_create_distributed_table('table_to_distribute', 'json_data', 'hash');
SELECT create_distributed_table('table_to_distribute', 'json_data', 'hash');
-- use a partition column of type lacking the required support function (hash)
SELECT master_create_distributed_table('table_to_distribute', 'test_type_data', 'hash');
SELECT create_distributed_table('table_to_distribute', 'test_type_data', 'hash');
-- distribute table and inspect side effects
SELECT master_create_distributed_table('table_to_distribute', 'name', 'hash');
@ -119,8 +119,9 @@ CREATE FOREIGN TABLE foreign_table_to_distribute
)
SERVER fake_fdw_server;
SELECT master_create_distributed_table('foreign_table_to_distribute', 'id', 'hash');
SELECT master_create_worker_shards('foreign_table_to_distribute', 16, 1);
SET citus.shard_count TO 16;
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('foreign_table_to_distribute', 'id', 'hash');
SELECT shardstorage, shardminvalue, shardmaxvalue FROM pg_dist_shard
WHERE logicalrelid = 'foreign_table_to_distribute'::regclass
@ -133,8 +134,8 @@ CREATE TABLE weird_shard_count
id bigint
);
SELECT master_create_distributed_table('weird_shard_count', 'id', 'hash');
SELECT master_create_worker_shards('weird_shard_count', 7, 1);
SET citus.shard_count TO 7;
SELECT create_distributed_table('weird_shard_count', 'id', 'hash');
-- Citus ensures all shards are roughly the same size
SELECT shardmaxvalue::integer - shardminvalue::integer AS shard_size

View File

@ -28,7 +28,7 @@ CREATE TABLE lineitem (
l_shipmode char(10) not null,
l_comment varchar(44) not null,
PRIMARY KEY(l_orderkey, l_linenumber) );
SELECT master_create_distributed_table('lineitem', 'l_orderkey', 'append');
SELECT create_distributed_table('lineitem', 'l_orderkey', 'append');
CREATE INDEX lineitem_time_index ON lineitem (l_shipdate);
@ -43,7 +43,7 @@ CREATE TABLE orders (
o_shippriority integer not null,
o_comment varchar(79) not null,
PRIMARY KEY(o_orderkey) );
SELECT master_create_distributed_table('orders', 'o_orderkey', 'append');
SELECT create_distributed_table('orders', 'o_orderkey', 'append');
CREATE TABLE orders_reference (
o_orderkey bigint not null,
@ -79,7 +79,7 @@ CREATE TABLE customer_append (
c_acctbal decimal(15,2) not null,
c_mktsegment char(10) not null,
c_comment varchar(117) not null);
SELECT master_create_distributed_table('customer_append', 'c_custkey', 'append');
SELECT create_distributed_table('customer_append', 'c_custkey', 'append');
CREATE TABLE nation (
n_nationkey integer not null,
@ -111,7 +111,7 @@ CREATE TABLE part_append (
p_container char(10) not null,
p_retailprice decimal(15,2) not null,
p_comment varchar(23) not null);
SELECT master_create_distributed_table('part_append', 'p_partkey', 'append');
SELECT create_distributed_table('part_append', 'p_partkey', 'append');
CREATE TABLE supplier
(
@ -137,7 +137,7 @@ CREATE TABLE supplier_single_shard
s_acctbal decimal(15,2) not null,
s_comment varchar(101) not null
);
SELECT master_create_distributed_table('supplier_single_shard', 's_suppkey', 'append');
SELECT create_distributed_table('supplier_single_shard', 's_suppkey', 'append');
CREATE TABLE mx_table_test (col1 int, col2 text);

View File

@ -11,7 +11,7 @@ CREATE TABLE uniq_cns_append_tables
partition_col integer UNIQUE,
other_col integer
);
SELECT master_create_distributed_table('uniq_cns_append_tables', 'partition_col', 'append');
SELECT create_distributed_table('uniq_cns_append_tables', 'partition_col', 'append');
CREATE TABLE excl_cns_append_tables
(
@ -19,7 +19,7 @@ CREATE TABLE excl_cns_append_tables
other_col integer,
EXCLUDE (partition_col WITH =)
);
SELECT master_create_distributed_table('excl_cns_append_tables', 'partition_col', 'append');
SELECT create_distributed_table('excl_cns_append_tables', 'partition_col', 'append');
-- test that Citus cannot distribute unique constraints that do not include
-- the partition column on hash-partitioned tables.
@ -29,14 +29,14 @@ CREATE TABLE pk_on_non_part_col
partition_col integer,
other_col integer PRIMARY KEY
);
SELECT master_create_distributed_table('pk_on_non_part_col', 'partition_col', 'hash');
SELECT create_distributed_table('pk_on_non_part_col', 'partition_col', 'hash');
CREATE TABLE uq_on_non_part_col
(
partition_col integer,
other_col integer UNIQUE
);
SELECT master_create_distributed_table('uq_on_non_part_col', 'partition_col', 'hash');
SELECT create_distributed_table('uq_on_non_part_col', 'partition_col', 'hash');
CREATE TABLE ex_on_non_part_col
(
@ -44,7 +44,7 @@ CREATE TABLE ex_on_non_part_col
other_col integer,
EXCLUDE (other_col WITH =)
);
SELECT master_create_distributed_table('ex_on_non_part_col', 'partition_col', 'hash');
SELECT create_distributed_table('ex_on_non_part_col', 'partition_col', 'hash');
-- now show that Citus can distribute unique and EXCLUDE constraints that
-- include the partition column for hash-partitioned tables.
@ -57,14 +57,14 @@ CREATE TABLE pk_on_part_col
partition_col integer PRIMARY KEY,
other_col integer
);
SELECT master_create_distributed_table('pk_on_part_col', 'partition_col', 'hash');
SELECT create_distributed_table('pk_on_part_col', 'partition_col', 'hash');
CREATE TABLE uq_part_col
(
partition_col integer UNIQUE,
other_col integer
);
SELECT master_create_distributed_table('uq_part_col', 'partition_col', 'hash');
SELECT create_distributed_table('uq_part_col', 'partition_col', 'hash');
CREATE TABLE uq_two_columns
(
@ -72,8 +72,7 @@ CREATE TABLE uq_two_columns
other_col integer,
UNIQUE (partition_col, other_col)
);
SELECT master_create_distributed_table('uq_two_columns', 'partition_col', 'hash');
SELECT master_create_worker_shards('uq_two_columns', '4', '2');
SELECT create_distributed_table('uq_two_columns', 'partition_col', 'hash');
INSERT INTO uq_two_columns (partition_col, other_col) VALUES (1,1);
INSERT INTO uq_two_columns (partition_col, other_col) VALUES (1,1);
@ -83,8 +82,7 @@ CREATE TABLE ex_on_part_col
other_col integer,
EXCLUDE (partition_col WITH =)
);
SELECT master_create_distributed_table('ex_on_part_col', 'partition_col', 'hash');
SELECT master_create_worker_shards('ex_on_part_col', '4', '2');
SELECT create_distributed_table('ex_on_part_col', 'partition_col', 'hash');
INSERT INTO ex_on_part_col (partition_col, other_col) VALUES (1,1);
INSERT INTO ex_on_part_col (partition_col, other_col) VALUES (1,2);
@ -94,8 +92,7 @@ CREATE TABLE ex_on_two_columns
other_col integer,
EXCLUDE (partition_col WITH =, other_col WITH =)
);
SELECT master_create_distributed_table('ex_on_two_columns', 'partition_col', 'hash');
SELECT master_create_worker_shards('ex_on_two_columns', '4', '2');
SELECT create_distributed_table('ex_on_two_columns', 'partition_col', 'hash');
INSERT INTO ex_on_two_columns (partition_col, other_col) VALUES (1,1);
INSERT INTO ex_on_two_columns (partition_col, other_col) VALUES (1,1);
@ -105,8 +102,7 @@ CREATE TABLE ex_on_two_columns_prt
other_col integer,
EXCLUDE (partition_col WITH =, other_col WITH =) WHERE (other_col > 100)
);
SELECT master_create_distributed_table('ex_on_two_columns_prt', 'partition_col', 'hash');
SELECT master_create_worker_shards('ex_on_two_columns_prt', '4', '2');
SELECT create_distributed_table('ex_on_two_columns_prt', 'partition_col', 'hash');
INSERT INTO ex_on_two_columns_prt (partition_col, other_col) VALUES (1,1);
INSERT INTO ex_on_two_columns_prt (partition_col, other_col) VALUES (1,1);
INSERT INTO ex_on_two_columns_prt (partition_col, other_col) VALUES (1,101);
@ -118,7 +114,7 @@ CREATE TABLE ex_wrong_operator
other_col tsrange,
EXCLUDE USING gist (other_col WITH =, partition_col WITH &&)
);
SELECT master_create_distributed_table('ex_wrong_operator', 'partition_col', 'hash');
SELECT create_distributed_table('ex_wrong_operator', 'partition_col', 'hash');
CREATE TABLE ex_overlaps
(
@ -126,8 +122,7 @@ CREATE TABLE ex_overlaps
other_col tsrange,
EXCLUDE USING gist (other_col WITH &&, partition_col WITH =)
);
SELECT master_create_distributed_table('ex_overlaps', 'partition_col', 'hash');
SELECT master_create_worker_shards('ex_overlaps', '4', '2');
SELECT create_distributed_table('ex_overlaps', 'partition_col', 'hash');
INSERT INTO ex_overlaps (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-01 00:00:00, 2016-02-01 00:00:00]');
INSERT INTO ex_overlaps (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-15 00:00:00, 2016-02-01 00:00:00]');
@ -142,14 +137,14 @@ CREATE TABLE pk_on_part_col_named
partition_col integer CONSTRAINT pk_on_part_col_named_pk PRIMARY KEY,
other_col integer
);
SELECT master_create_distributed_table('pk_on_part_col_named', 'partition_col', 'hash');
SELECT create_distributed_table('pk_on_part_col_named', 'partition_col', 'hash');
CREATE TABLE uq_part_col_named
(
partition_col integer CONSTRAINT uq_part_col_named_uniq UNIQUE,
other_col integer
);
SELECT master_create_distributed_table('uq_part_col_named', 'partition_col', 'hash');
SELECT create_distributed_table('uq_part_col_named', 'partition_col', 'hash');
CREATE TABLE uq_two_columns_named
(
@ -157,8 +152,7 @@ CREATE TABLE uq_two_columns_named
other_col integer,
CONSTRAINT uq_two_columns_named_uniq UNIQUE (partition_col, other_col)
);
SELECT master_create_distributed_table('uq_two_columns_named', 'partition_col', 'hash');
SELECT master_create_worker_shards('uq_two_columns_named', '4', '2');
SELECT create_distributed_table('uq_two_columns_named', 'partition_col', 'hash');
INSERT INTO uq_two_columns_named (partition_col, other_col) VALUES (1,1);
INSERT INTO uq_two_columns_named (partition_col, other_col) VALUES (1,1);
@ -168,8 +162,7 @@ CREATE TABLE ex_on_part_col_named
other_col integer,
CONSTRAINT ex_on_part_col_named_exclude EXCLUDE (partition_col WITH =)
);
SELECT master_create_distributed_table('ex_on_part_col_named', 'partition_col', 'hash');
SELECT master_create_worker_shards('ex_on_part_col_named', '4', '2');
SELECT create_distributed_table('ex_on_part_col_named', 'partition_col', 'hash');
INSERT INTO ex_on_part_col_named (partition_col, other_col) VALUES (1,1);
INSERT INTO ex_on_part_col_named (partition_col, other_col) VALUES (1,2);
@ -179,8 +172,7 @@ CREATE TABLE ex_on_two_columns_named
other_col integer,
CONSTRAINT ex_on_two_columns_named_exclude EXCLUDE (partition_col WITH =, other_col WITH =)
);
SELECT master_create_distributed_table('ex_on_two_columns_named', 'partition_col', 'hash');
SELECT master_create_worker_shards('ex_on_two_columns_named', '4', '2');
SELECT create_distributed_table('ex_on_two_columns_named', 'partition_col', 'hash');
INSERT INTO ex_on_two_columns_named (partition_col, other_col) VALUES (1,1);
INSERT INTO ex_on_two_columns_named (partition_col, other_col) VALUES (1,1);
@ -192,8 +184,7 @@ CREATE TABLE ex_multiple_excludes
CONSTRAINT ex_multiple_excludes_excl1 EXCLUDE (partition_col WITH =, other_col WITH =),
CONSTRAINT ex_multiple_excludes_excl2 EXCLUDE (partition_col WITH =, other_other_col WITH =)
);
SELECT master_create_distributed_table('ex_multiple_excludes', 'partition_col', 'hash');
SELECT master_create_worker_shards('ex_multiple_excludes', '4', '2');
SELECT create_distributed_table('ex_multiple_excludes', 'partition_col', 'hash');
INSERT INTO ex_multiple_excludes (partition_col, other_col, other_other_col) VALUES (1,1,1);
INSERT INTO ex_multiple_excludes (partition_col, other_col, other_other_col) VALUES (1,1,2);
INSERT INTO ex_multiple_excludes (partition_col, other_col, other_other_col) VALUES (1,2,1);
@ -204,7 +195,7 @@ CREATE TABLE ex_wrong_operator_named
other_col tsrange,
CONSTRAINT ex_wrong_operator_named_exclude EXCLUDE USING gist (other_col WITH =, partition_col WITH &&)
);
SELECT master_create_distributed_table('ex_wrong_operator_named', 'partition_col', 'hash');
SELECT create_distributed_table('ex_wrong_operator_named', 'partition_col', 'hash');
CREATE TABLE ex_overlaps_named
(
@ -212,8 +203,7 @@ CREATE TABLE ex_overlaps_named
other_col tsrange,
CONSTRAINT ex_overlaps_operator_named_exclude EXCLUDE USING gist (other_col WITH &&, partition_col WITH =)
);
SELECT master_create_distributed_table('ex_overlaps_named', 'partition_col', 'hash');
SELECT master_create_worker_shards('ex_overlaps_named', '4', '2');
SELECT create_distributed_table('ex_overlaps_named', 'partition_col', 'hash');
INSERT INTO ex_overlaps_named (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-01 00:00:00, 2016-02-01 00:00:00]');
INSERT INTO ex_overlaps_named (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-15 00:00:00, 2016-02-01 00:00:00]');
@ -224,7 +214,7 @@ CREATE TABLE uq_range_tables
partition_col integer UNIQUE,
other_col integer
);
SELECT master_create_distributed_table('uq_range_tables', 'partition_col', 'range');
SELECT create_distributed_table('uq_range_tables', 'partition_col', 'range');
-- show that CHECK constraints are distributed.
CREATE TABLE check_example
@ -233,12 +223,10 @@ CREATE TABLE check_example
other_col integer CHECK (other_col >= 100),
other_other_col integer CHECK (abs(other_other_col) >= 100)
);
SELECT master_create_distributed_table('check_example', 'partition_col', 'hash');
SELECT master_create_worker_shards('check_example', '2', '2');
SELECT create_distributed_table('check_example', 'partition_col', 'hash');
\c - - - :worker_1_port
\d check_example_partition_col_key_365040
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.check_example_365040'::regclass;
\d check_example_partition_col_key_365056
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.check_example_365056'::regclass;
\c - - - :master_port
-- drop unnecessary tables

View File

@ -13,7 +13,7 @@ CREATE TABLE table_identity_col (
id integer GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY,
payload text );
SELECT master_create_distributed_table('table_identity_col', 'id', 'append');
SELECT create_distributed_table('table_identity_col', 'id', 'append');
SELECT create_distributed_table('table_identity_col', 'id');
SELECT create_distributed_table('table_identity_col', 'text');

View File

@ -18,11 +18,11 @@ INSERT INTO multi_task_table VALUES(3, 'elem_3');
-- Shouldn't log anything when the log level is 'off'
SHOW citus.multi_task_query_log_level;
SELECT * FROM multi_task_table;
SELECT * FROM multi_task_table ORDER BY 1;
-- Get messages with the log level 'notice'
SET citus.multi_task_query_log_level TO notice;
SELECT * FROM multi_task_table;
SELECT * FROM multi_task_table ORDER BY 1;
SELECT AVG(id) AS avg_id FROM multi_task_table;
-- Get messages with the log level 'error'
@ -65,7 +65,7 @@ INSERT INTO summary_table SELECT id, SUM(order_count) FROM raw_table WHERE id =
-- Should have four rows (three rows from the query without where and the one from with where)
SET citus.multi_task_query_log_level to DEFAULT;
SELECT * FROM summary_table;
SELECT * FROM summary_table ORDER BY 1,2;
-- Set log-level to different levels inside the transaction
BEGIN;
@ -80,7 +80,7 @@ ROLLBACK;
-- Should have only four rows since the transaction is rollbacked.
SET citus.multi_task_query_log_level to DEFAULT;
SELECT * FROM summary_table;
SELECT * FROM summary_table ORDER BY 1,2;
-- Test router-select query
SET citus.multi_task_query_log_level TO notice;

View File

@ -65,9 +65,8 @@ CREATE TABLE composite_type_partitioned_table
col test_composite_type
);
SELECT master_create_distributed_table('composite_type_partitioned_table', 'col', 'hash');
SELECT master_create_worker_shards('composite_type_partitioned_table', 4, 1);
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('composite_type_partitioned_table', 'col', 'hash');
-- execute INSERT, SELECT and UPDATE queries on composite_type_partitioned_table
INSERT INTO composite_type_partitioned_table VALUES (1, '(1, 2)'::test_composite_type);
@ -91,9 +90,7 @@ CREATE TABLE bugs (
status bug_status
);
SELECT master_create_distributed_table('bugs', 'status', 'hash');
SELECT master_create_worker_shards('bugs', 4, 1);
SELECT create_distributed_table('bugs', 'status', 'hash');
-- execute INSERT, SELECT and UPDATE queries on composite_type_partitioned_table
INSERT INTO bugs VALUES (1, 'new');
@ -115,8 +112,7 @@ CREATE TABLE varchar_hash_partitioned_table
name varchar
);
SELECT master_create_distributed_table('varchar_hash_partitioned_table', 'name', 'hash');
SELECT master_create_worker_shards('varchar_hash_partitioned_table', 4, 1);
SELECT create_distributed_table('varchar_hash_partitioned_table', 'name', 'hash');
-- execute INSERT, SELECT and UPDATE queries on composite_type_partitioned_table
INSERT INTO varchar_hash_partitioned_table VALUES (1, 'Jason');

View File

@ -4,6 +4,7 @@
SET citus.next_shard_id TO 13100000;
SET citus.shard_replication_factor TO 1;
CREATE FUNCTION deparse_shard_query_test(text)
RETURNS VOID
@ -23,8 +24,7 @@ CREATE TABLE raw_events_1
event_at date DEfAULT now()
);
SELECT master_create_distributed_table('raw_events_1', 'tenant_id', 'hash');
SELECT master_create_worker_shards('raw_events_1', 4, 1);
SELECT create_distributed_table('raw_events_1', 'tenant_id', 'hash');
-- create the first table
CREATE TABLE raw_events_2
@ -39,8 +39,7 @@ CREATE TABLE raw_events_2
event_at date DEfAULT now()
);
SELECT master_create_distributed_table('raw_events_2', 'tenant_id', 'hash');
SELECT master_create_worker_shards('raw_events_2', 4, 1);
SELECT create_distributed_table('raw_events_2', 'tenant_id', 'hash');
CREATE TABLE aggregated_events
(tenant_id bigint,
@ -52,8 +51,7 @@ CREATE TABLE aggregated_events
average_value_6 int,
rollup_hour date);
SELECT master_create_distributed_table('aggregated_events', 'tenant_id', 'hash');
SELECT master_create_worker_shards('aggregated_events', 4, 1);
SELECT create_distributed_table('aggregated_events', 'tenant_id', 'hash');
-- start with very simple examples on a single table

View File

@ -60,10 +60,7 @@ CREATE TABLE events_hash (
id bigint,
name text
);
SELECT master_create_distributed_table('events_hash', 'name', 'hash');
-- create worker shards
SELECT master_create_worker_shards('events_hash', 4, 2);
SELECT create_distributed_table('events_hash', 'name', 'hash');
-- set shardstate of one replication from each shard to 0 (invalid value)
UPDATE pg_dist_placement SET shardstate = 0 WHERE shardid BETWEEN 540000 AND 540003

View File

@ -8,7 +8,7 @@ SET citus.next_shard_id TO 550000;
CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL);
SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append');
SELECT create_distributed_table('testtableddl', 'distributecol', 'append');
-- this emits a NOTICE message for every table we are dropping with our CASCADE. It would
-- be nice to check that we get those NOTICE messages, but it's nicer to not have to
@ -26,7 +26,7 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port);
-- verify that a table can be created after the extension has been dropped and recreated
CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL);
SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append');
SELECT create_distributed_table('testtableddl', 'distributecol', 'append');
SELECT 1 FROM master_create_empty_shard('testtableddl');
SELECT * FROM testtableddl;
DROP TABLE testtableddl;

View File

@ -5,10 +5,6 @@
-- Check that we can run CREATE INDEX and DROP INDEX statements on distributed
-- tables.
SET citus.next_shard_id TO 640000;
--
-- CREATE TEST TABLES
--
@ -16,16 +12,17 @@ SET citus.next_shard_id TO 640000;
SET citus.next_shard_id TO 102080;
CREATE TABLE index_test_range(a int, b int, c int);
SELECT master_create_distributed_table('index_test_range', 'a', 'range');
SELECT create_distributed_table('index_test_range', 'a', 'range');
SELECT master_create_empty_shard('index_test_range');
SELECT master_create_empty_shard('index_test_range');
SET citus.shard_count TO 8;
SET citus.shard_replication_factor TO 2;
CREATE TABLE index_test_hash(a int, b int, c int);
SELECT master_create_distributed_table('index_test_hash', 'a', 'hash');
SELECT master_create_worker_shards('index_test_hash', 8, 2);
SELECT create_distributed_table('index_test_hash', 'a', 'hash');
CREATE TABLE index_test_append(a int, b int, c int);
SELECT master_create_distributed_table('index_test_append', 'a', 'append');
SELECT create_distributed_table('index_test_append', 'a', 'append');
SELECT master_create_empty_shard('index_test_append');
SELECT master_create_empty_shard('index_test_append');

View File

@ -1742,7 +1742,7 @@ TRUNCATE raw_events_first;
BEGIN;
INSERT INTO raw_events_first (user_id, value_1)
SELECT s, s FROM generate_series(1, 5) s;
SELECT user_id, value_1 FROM raw_events_first;
SELECT user_id, value_1 FROM raw_events_first ORDER BY 1;
ROLLBACK;
-- INSERT ... SELECT and single-shard SELECT in the same transaction is supported

View File

@ -28,17 +28,17 @@ CREATE TABLE insufficient_shards ( LIKE limit_orders );
CREATE TABLE range_partitioned ( LIKE limit_orders );
CREATE TABLE append_partitioned ( LIKE limit_orders );
SELECT master_create_distributed_table('limit_orders', 'id', 'hash');
SELECT master_create_distributed_table('multiple_hash', 'category', 'hash');
SELECT master_create_distributed_table('insufficient_shards', 'id', 'hash');
SELECT master_create_distributed_table('range_partitioned', 'id', 'range');
SELECT master_create_distributed_table('append_partitioned', 'id', 'append');
SET citus.shard_count TO 2;
SELECT master_create_worker_shards('limit_orders', 2, 2);
SELECT master_create_worker_shards('multiple_hash', 2, 2);
SELECT create_distributed_table('limit_orders', 'id', 'hash');
SELECT create_distributed_table('multiple_hash', 'id', 'hash');
SELECT create_distributed_table('range_partitioned', 'id', 'range');
SELECT create_distributed_table('append_partitioned', 'id', 'append');
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 1;
-- make a single shard that covers no partition values
SELECT master_create_worker_shards('insufficient_shards', 1, 1);
SELECT create_distributed_table('insufficient_shards', 'id', 'hash');
UPDATE pg_dist_shard SET shardminvalue = 0, shardmaxvalue = 0
WHERE logicalrelid = 'insufficient_shards'::regclass;
@ -309,7 +309,7 @@ UPDATE limit_orders SET limit_price = 0.00 FROM bidders
limit_orders.bidder_id = bidders.id AND
bidders.name = 'Bernie Madoff';
-- the connection used for the INSERT is claimed by pull-push, causing the UPDATE to fail
-- should succeed with a CTE
WITH deleted_orders AS (INSERT INTO limit_orders VALUES (399, 'PDR', 14, '2017-07-02 16:32:15', 'sell', 43))
UPDATE limit_orders SET symbol = 'GM';
@ -420,8 +420,8 @@ SELECT * FROM multiple_hash WHERE category = '2' ORDER BY category, data;
-- verify interaction of default values, SERIAL, and RETURNING
\set QUIET on
CREATE TABLE app_analytics_events (id serial, app_id integer, name text);
SELECT master_create_distributed_table('app_analytics_events', 'app_id', 'hash');
SELECT master_create_worker_shards('app_analytics_events', 4, 1);
SET citus.shard_count TO 4;
SELECT create_distributed_table('app_analytics_events', 'app_id', 'hash');
INSERT INTO app_analytics_events VALUES (DEFAULT, 101, 'Fauxkemon Geaux') RETURNING id;
INSERT INTO app_analytics_events (app_id, name) VALUES (102, 'Wayz') RETURNING id;
@ -468,7 +468,7 @@ DROP TABLE app_analytics_events;
-- Test multi-row insert with a dropped column before the partition column
CREATE TABLE app_analytics_events (id int default 3, app_id integer, name text);
SELECT create_distributed_table('app_analytics_events', 'name');
SELECT create_distributed_table('app_analytics_events', 'name', colocate_with => 'none');
ALTER TABLE app_analytics_events DROP COLUMN app_id;

View File

@ -5,6 +5,7 @@
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 225000;
SET citus.multi_shard_commit_protocol = '2pc';
SET citus.shard_count TO 2;
-- Verify that a table name > 56 characters gets hashed properly.
CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 (
@ -17,6 +18,9 @@ SELECT master_create_worker_shards('too_long_12345678901234567890123456789012345
\dt too_long_*
\c - - - :master_port
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 2;
-- Verify that the UDF works and rejects bad arguments.
SELECT shard_name(NULL, 666666);
SELECT shard_name(0, 666666);
@ -34,8 +38,7 @@ CREATE TABLE name_lengths (
constraint constraint_a UNIQUE (col1)
);
SELECT master_create_distributed_table('name_lengths', 'col1', 'hash');
SELECT master_create_worker_shards('name_lengths', '2', '2');
SELECT create_distributed_table('name_lengths', 'col1', 'hash');
-- Verify that we CAN add columns with "too-long names", because
-- the columns' names are not extended in the corresponding shard tables.
@ -82,6 +85,9 @@ CREATE INDEX tmp_idx_12345678901234567890123456789012345678901234567890123456789
\d tmp_idx_*
\c - - - :master_port
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 2;
-- Verify that distributed tables with too-long names
-- for CHECK constraints are no trouble.
CREATE TABLE sneaky_name_lengths (
@ -90,8 +96,7 @@ CREATE TABLE sneaky_name_lengths (
int_col_12345678901234567890123456789012345678901234567890 integer not null,
CHECK (int_col_12345678901234567890123456789012345678901234567890 > 100)
);
SELECT master_create_distributed_table('sneaky_name_lengths', 'col1', 'hash');
SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2');
SELECT create_distributed_table('sneaky_name_lengths', 'col1', 'hash');
DROP TABLE sneaky_name_lengths CASCADE;
CREATE TABLE sneaky_name_lengths (
@ -111,6 +116,9 @@ SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2');
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.sneaky_name_lengths_225006'::regclass;
\c - - - :master_port
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 2;
DROP TABLE sneaky_name_lengths CASCADE;
-- verify that named constraint with too-long name gets hashed properly
@ -120,13 +128,15 @@ CREATE TABLE sneaky_name_lengths (
int_col_12345678901234567890123456789012345678901234567890 integer not null,
constraint unique_12345678901234567890123456789012345678901234567890 UNIQUE (col1)
);
SELECT master_create_distributed_table('sneaky_name_lengths', 'col1', 'hash');
SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2');
SELECT create_distributed_table('sneaky_name_lengths', 'col1', 'hash');
\c - - - :worker_1_port
\di unique*225008
\c - - - :master_port
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 2;
DROP TABLE sneaky_name_lengths CASCADE;
-- Verify that much larger shardIds are handled properly
@ -134,21 +144,22 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 2250000000000;
CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 (
col1 integer not null,
col2 integer not null);
SELECT master_create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash');
SELECT master_create_worker_shards('too_long_12345678901234567890123456789012345678901234567890', '2', '2');
SELECT create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash');
\c - - - :worker_1_port
\dt *225000000000*
\c - - - :master_port
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 2;
DROP TABLE too_long_12345678901234567890123456789012345678901234567890 CASCADE;
-- Verify that multi-byte boundaries are respected for databases with UTF8 encoding.
CREATE TABLE U&"elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D" UESCAPE '!' (
col1 integer not null PRIMARY KEY,
col2 integer not null);
SELECT master_create_distributed_table(U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!', 'col1', 'hash');
SELECT master_create_worker_shards(U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!', '2', '2');
SELECT create_distributed_table(U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!', 'col1', 'hash');
-- Verify that quoting is used in shard_name
SELECT shard_name(U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!'::regclass, min(shardid))
@ -160,13 +171,15 @@ WHERE logicalrelid = U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!0
\di public.elephant_*
\c - - - :master_port
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 2;
-- Verify that shard_name UDF supports schemas
CREATE SCHEMA multi_name_lengths;
CREATE TABLE multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890 (
col1 integer not null,
col2 integer not null);
SELECT master_create_distributed_table('multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash');
SELECT master_create_worker_shards('multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890', 2, 1);
SELECT create_distributed_table('multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash');
SELECT shard_name('multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890'::regclass, min(shardid))
FROM pg_dist_shard

View File

@ -38,7 +38,7 @@ CREATE TABLE varchar_partitioned_table
(
varchar_column varchar(100)
);
SELECT master_create_distributed_table('varchar_partitioned_table', 'varchar_column', 'append');
SELECT create_distributed_table('varchar_partitioned_table', 'varchar_column', 'append');
-- Create logical shards and shard placements with shardid 100,101
@ -67,7 +67,7 @@ CREATE TABLE array_partitioned_table
(
array_column text[]
);
SELECT master_create_distributed_table('array_partitioned_table', 'array_column', 'append');
SELECT create_distributed_table('array_partitioned_table', 'array_column', 'append');
SET client_min_messages TO DEBUG2;
-- Create logical shard with shardid 102, 103
@ -105,7 +105,7 @@ CREATE TABLE composite_partitioned_table
(
composite_column composite_type
);
SELECT master_create_distributed_table('composite_partitioned_table', 'composite_column', 'append');
SELECT create_distributed_table('composite_partitioned_table', 'composite_column', 'append');
SET client_min_messages TO DEBUG2;
-- Create logical shard with shardid 104, 105

View File

@ -187,8 +187,8 @@ CREATE TABLE plpgsql_table (
key int,
value int
);
SELECT master_create_distributed_table('plpgsql_table','key','hash');
SELECT master_create_worker_shards('plpgsql_table',4,1);
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('plpgsql_table','key','hash');
CREATE FUNCTION no_parameter_insert() RETURNS void as $$
BEGIN

View File

@ -160,9 +160,8 @@ CREATE TABLE router_executor_table (
comment varchar(20),
stats test_composite_type
);
SELECT master_create_distributed_table('router_executor_table', 'id', 'hash');
SELECT master_create_worker_shards('router_executor_table', 2, 2);
SET citus.shard_count TO 2;
SELECT create_distributed_table('router_executor_table', 'id', 'hash');
-- test parameterized inserts
PREPARE prepared_insert(varchar(20)) AS
@ -209,8 +208,9 @@ CREATE TABLE prepare_table (
key int,
value int
);
SELECT master_create_distributed_table('prepare_table','key','hash');
SELECT master_create_worker_shards('prepare_table',4,1);
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('prepare_table','key','hash');
PREPARE prepared_no_parameter_insert AS
INSERT INTO prepare_table (key) VALUES (0);
@ -581,8 +581,9 @@ CREATE OR REPLACE FUNCTION immutable_bleat(text) RETURNS int LANGUAGE plpgsql IM
-- test table
CREATE TABLE test_table (test_id integer NOT NULL, data text);
SELECT master_create_distributed_table('test_table', 'test_id', 'hash');
SELECT master_create_worker_shards('test_table', 2, 2);
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 2;
SELECT create_distributed_table('test_table', 'test_id', 'hash');
-- avoid 9.6+ only context messages
\set VERBOSITY terse

View File

@ -42,10 +42,8 @@ CREATE FUNCTION print_sorted_shard_intervals(regclass)
-- create distributed table observe shard pruning
CREATE TABLE pruning ( species text, last_pruned date, plant_id integer );
SELECT master_create_distributed_table('pruning', 'species', 'hash');
-- create worker shards
SELECT master_create_worker_shards('pruning', 4, 1);
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('pruning', 'species', 'hash');
-- with no values, expect all shards
SELECT prune_using_no_values('pruning');
@ -89,7 +87,7 @@ SELECT print_sorted_shard_intervals('pruning');
-- create range distributed table observe shard pruning
CREATE TABLE pruning_range ( species text, last_pruned date, plant_id integer );
SELECT master_create_distributed_table('pruning_range', 'species', 'range');
SELECT create_distributed_table('pruning_range', 'species', 'range');
-- create worker shards
SELECT master_create_empty_shard('pruning_range');

View File

@ -15,10 +15,10 @@ CREATE INDEX ON customer_engagements (created_at);
CREATE INDEX ON customer_engagements (event_data);
-- distribute the table
SELECT master_create_distributed_table('customer_engagements', 'id', 'hash');
-- create a single shard on the first worker
SELECT master_create_worker_shards('customer_engagements', 1, 2);
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 2;
SELECT create_distributed_table('customer_engagements', 'id', 'hash');
-- ingest some data for the tests
INSERT INTO customer_engagements VALUES (1, '01-01-2015', 'first event');
@ -92,10 +92,10 @@ CREATE FOREIGN TABLE remote_engagements (
) SERVER fake_fdw_server;
-- distribute the table
SELECT master_create_distributed_table('remote_engagements', 'id', 'hash');
-- create a single shard on the first worker
SELECT master_create_worker_shards('remote_engagements', 1, 2);
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 2;
SELECT create_distributed_table('remote_engagements', 'id', 'hash');
-- get the newshardid
SELECT shardid as remotenewshardid FROM pg_dist_shard WHERE logicalrelid = 'remote_engagements'::regclass

View File

@ -167,11 +167,11 @@ FUNCTION 1 test_udt_hash(test_udt);
\c - - - :master_port
-- Distribute and populate the two tables.
SELECT master_create_distributed_table('repartition_udt', 'pk', 'hash');
SELECT master_create_worker_shards('repartition_udt', 3, 1);
SELECT master_create_distributed_table('repartition_udt_other', 'pk', 'hash');
SELECT master_create_worker_shards('repartition_udt_other', 5, 1);
SET citus.shard_count TO 3;
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('repartition_udt', 'pk', 'hash');
SET citus.shard_count TO 5;
SELECT create_distributed_table('repartition_udt_other', 'pk', 'hash');
INSERT INTO repartition_udt values (1, '(1,1)'::test_udt, 'foo');
INSERT INTO repartition_udt values (2, '(1,2)'::test_udt, 'foo');

View File

@ -11,8 +11,7 @@ CREATE TABLE multi_shard_modify_test (
t_key integer not null,
t_name varchar(25) not null,
t_value integer not null);
SELECT master_create_distributed_table('multi_shard_modify_test', 't_key', 'hash');
SELECT master_create_worker_shards('multi_shard_modify_test', 4, 2);
SELECT create_distributed_table('multi_shard_modify_test', 't_key', 'hash');
COPY multi_shard_modify_test (t_key, t_name, t_value) FROM STDIN WITH (FORMAT 'csv');
1,san francisco,99
@ -74,8 +73,7 @@ CREATE TABLE temp_nations(name text, key integer);
SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test USING temp_nations WHERE multi_shard_modify_test.t_value = temp_nations.key AND temp_nations.name = ''foobar'' ');
-- commands with a USING clause are unsupported
SELECT master_create_distributed_table('temp_nations', 'name', 'hash');
SELECT master_create_worker_shards('temp_nations', 4, 2);
SELECT create_distributed_table('temp_nations', 'name', 'hash');
SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test USING temp_nations WHERE multi_shard_modify_test.t_value = temp_nations.key AND temp_nations.name = ''foobar'' ');
-- commands with a RETURNING clause are unsupported

View File

@ -62,8 +62,8 @@ CREATE TABLE temp_table (
key int,
value int
);
SELECT master_create_distributed_table('temp_table','key','hash');
SELECT master_create_worker_shards('temp_table',4,1);
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('temp_table','key','hash');
CREATE FUNCTION no_parameter_insert_sql() RETURNS void AS $$
INSERT INTO temp_table (key) VALUES (0);

View File

@ -557,9 +557,8 @@ CREATE TABLE subquery_pruning_varchar_test_table
a varchar,
b int
);
SELECT master_create_distributed_table('subquery_pruning_varchar_test_table', 'a', 'hash');
SELECT master_create_worker_shards('subquery_pruning_varchar_test_table', 4, 1);
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('subquery_pruning_varchar_test_table', 'a', 'hash');
-- temporarily disable router executor to test pruning behaviour of subquery pushdown
SET citus.enable_router_execution TO off;

View File

@ -114,9 +114,8 @@ CREATE TABLE researchers (
lab_id int NOT NULL,
name text NOT NULL
);
SELECT master_create_distributed_table('researchers', 'lab_id', 'hash');
SELECT master_create_worker_shards('researchers', 2, 2);
SET citus.shard_count TO 2;
SELECT create_distributed_table('researchers', 'lab_id', 'hash');
-- Basic rollback and release
BEGIN;

View File

@ -8,7 +8,7 @@ SET citus.next_shard_id TO 870000;
CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL);
SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append');
SELECT create_distributed_table('testtableddl', 'distributecol', 'append');
-- verify that the citus extension can't be dropped while distributed tables exist
DROP EXTENSION citus;
@ -28,7 +28,7 @@ COMMIT;
-- recreate testtableddl
CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL);
SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append');
SELECT create_distributed_table('testtableddl', 'distributecol', 'append');
-- verify that the table can be dropped
DROP TABLE testtableddl;
@ -38,7 +38,7 @@ CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL);
-- create table and do create empty shard test here, too
SET citus.shard_replication_factor TO 1;
SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append');
SELECT create_distributed_table('testtableddl', 'distributecol', 'append');
SELECT 1 FROM master_create_empty_shard('testtableddl');
-- now actually drop table and shards
@ -61,8 +61,10 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port);
-- create a table with a SERIAL column
CREATE TABLE testserialtable(id serial, group_id integer);
SELECT master_create_distributed_table('testserialtable', 'group_id', 'hash');
SELECT master_create_worker_shards('testserialtable', 2, 1);
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('testserialtable', 'group_id', 'hash');
-- should not be able to add additional serial columns
ALTER TABLE testserialtable ADD COLUMN other_id serial;

View File

@ -19,7 +19,7 @@ SET citus.explain_distributed_queries TO off;
-- and check that tasks are assigned to worker nodes as expected.
CREATE TABLE task_assignment_test_table (test_id integer);
SELECT master_create_distributed_table('task_assignment_test_table', 'test_id', 'append');
SELECT create_distributed_table('task_assignment_test_table', 'test_id', 'append');
-- Create logical shards with shardids 200, 201, and 202

View File

@ -10,7 +10,7 @@ SET citus.next_shard_id TO 1210000;
-- expect all shards to be dropped
--
CREATE TABLE test_truncate_append(a int);
SELECT master_create_distributed_table('test_truncate_append', 'a', 'append');
SELECT create_distributed_table('test_truncate_append', 'a', 'append');
-- verify no error is thrown when no shards are present
TRUNCATE TABLE test_truncate_append;
@ -50,7 +50,7 @@ DROP TABLE test_truncate_append;
-- expect shard to be present, data to be truncated
--
CREATE TABLE test_truncate_range(a int);
SELECT master_create_distributed_table('test_truncate_range', 'a', 'range');
SELECT create_distributed_table('test_truncate_range', 'a', 'range');
-- verify no error is thrown when no shards are present
TRUNCATE TABLE test_truncate_range;
@ -145,9 +145,9 @@ SELECT count(*) FROM test_truncate_hash;
DROP TABLE test_truncate_hash;
-- test with table with spaces in it
SET citus.shard_replication_factor TO 1;
CREATE TABLE "a b hash" (a int, b int);
SELECT master_create_distributed_table('"a b hash"', 'a', 'hash');
SELECT master_create_worker_shards('"a b hash"', 4, 1);
SELECT create_distributed_table('"a b hash"', 'a', 'hash');
INSERT INTO "a b hash" values (1, 0);
SELECT * from "a b hash";
TRUNCATE TABLE "a b hash";
@ -157,7 +157,7 @@ DROP TABLE "a b hash";
-- now with append
CREATE TABLE "a b append" (a int, b int);
SELECT master_create_distributed_table('"a b append"', 'a', 'append');
SELECT create_distributed_table('"a b append"', 'a', 'append');
SELECT master_create_empty_shard('"a b append"') AS new_shard_id \gset
UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 500
WHERE shardid = :new_shard_id;

View File

@ -12,8 +12,7 @@ CREATE TABLE upsert_test
);
-- distribute the table and create shards
SELECT master_create_distributed_table('upsert_test', 'part_key', 'hash');
SELECT master_create_worker_shards('upsert_test', '4', '2');
SELECT create_distributed_table('upsert_test', 'part_key', 'hash');
-- do a regular insert
INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1), (2, 2);
@ -116,8 +115,7 @@ CREATE TABLE upsert_test_2
);
-- distribute the table and create shards
SELECT master_create_distributed_table('upsert_test_2', 'part_key', 'hash');
SELECT master_create_worker_shards('upsert_test_2', '4', '2');
SELECT create_distributed_table('upsert_test_2', 'part_key', 'hash');
-- now show that Citus works with multiple columns as the PRIMARY KEY, including the partiton key
INSERT INTO upsert_test_2 (part_key, other_col) VALUES (1, 1);
@ -137,8 +135,7 @@ CREATE TABLE upsert_test_3
CREATE INDEX idx_ups_test ON upsert_test_3(part_key);
-- distribute the table and create shards
SELECT master_create_distributed_table('upsert_test_3', 'part_key', 'hash');
SELECT master_create_worker_shards('upsert_test_3', '4', '2');
SELECT create_distributed_table('upsert_test_3', 'part_key', 'hash');
-- since there are no unique indexes, error-out
INSERT INTO upsert_test_3 VALUES (1, 0) ON CONFLICT(part_key) DO UPDATE SET count = upsert_test_3.count + 1;
@ -151,8 +148,7 @@ CREATE TABLE upsert_test_4
);
-- distribute the table and create shards
SELECT master_create_distributed_table('upsert_test_4', 'part_key', 'hash');
SELECT master_create_worker_shards('upsert_test_4', '4', '2');
SELECT create_distributed_table('upsert_test_4', 'part_key', 'hash');
-- a single row insert
INSERT INTO upsert_test_4 VALUES (1, 0);
@ -169,9 +165,9 @@ INSERT INTO upsert_test_4 VALUES (1, 0) ON CONFLICT(part_key) DO UPDATE SET coun
SELECT * FROM upsert_test_4;
-- now test dropped columns
SET citus.shard_replication_factor TO 1;
CREATE TABLE dropcol_distributed(key int primary key, drop1 int, keep1 text, drop2 numeric, keep2 float);
SELECT master_create_distributed_table('dropcol_distributed', 'key', 'hash');
SELECT master_create_worker_shards('dropcol_distributed', 4, 1);
SELECT create_distributed_table('dropcol_distributed', 'key', 'hash');
INSERT INTO dropcol_distributed AS dropcol (key, keep1, keep2) VALUES (1, '5', 5) ON CONFLICT(key)
DO UPDATE SET keep1 = dropcol.keep1;

View File

@ -7,10 +7,11 @@ SELECT substring(:'server_version', '\d+')::int > 10 as version_above_ten;
-- ===================================================================
-- test utility statement functionality
-- ===================================================================
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 1;
CREATE TABLE sharded_table ( name text, id bigint );
SELECT master_create_distributed_table('sharded_table', 'id', 'hash');
SELECT master_create_worker_shards('sharded_table', 2, 1);
SELECT create_distributed_table('sharded_table', 'id', 'hash');
-- COPY out is supported with distributed tables
COPY sharded_table TO STDOUT;
@ -97,9 +98,11 @@ DROP TABLE sharded_table;
-- VACUUM tests
-- create a table with a single shard (for convenience)
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 2;
CREATE TABLE dustbunnies (id integer, name text, age integer);
SELECT master_create_distributed_table('dustbunnies', 'id', 'hash');
SELECT master_create_worker_shards('dustbunnies', 1, 2);
SELECT create_distributed_table('dustbunnies', 'id', 'hash');
-- add some data to the distributed table
\copy dustbunnies (id, name) from stdin with csv