diff --git a/src/test/regress/expected/multi_agg_approximate_distinct.out b/src/test/regress/expected/multi_agg_approximate_distinct.out index d448060ff..1a5fac9b4 100644 --- a/src/test/regress/expected/multi_agg_approximate_distinct.out +++ b/src/test/regress/expected/multi_agg_approximate_distinct.out @@ -117,6 +117,8 @@ SELECT count(DISTINCT l_orderkey) as distinct_order_count, l_quantity FROM linei -- Check that approximate count(distinct) works at a table in a schema other than public -- create necessary objects +SET citus.next_shard_id TO 20000000; +SET citus.next_placement_id TO 20000000; CREATE SCHEMA test_count_distinct_schema; CREATE TABLE test_count_distinct_schema.nation_hash( n_nationkey integer not null, @@ -124,15 +126,9 @@ CREATE TABLE test_count_distinct_schema.nation_hash( n_regionkey integer not null, n_comment varchar(152) ); -SELECT master_create_distributed_table('test_count_distinct_schema.nation_hash', 'n_nationkey', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('test_count_distinct_schema.nation_hash', 4, 2); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('test_count_distinct_schema.nation_hash', 'n_nationkey', 'hash'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_cache_invalidation.out b/src/test/regress/expected/multi_cache_invalidation.out index dc475ee88..f6bff6713 100644 --- a/src/test/regress/expected/multi_cache_invalidation.out +++ b/src/test/regress/expected/multi_cache_invalidation.out @@ -1,9 +1,11 @@ SET citus.next_shard_id TO 1601000; +SET citus.shard_count TO 1; +SET citus.shard_replication_factor TO 1; CREATE TABLE tab9 (test_id integer NOT NULL, data int); CREATE TABLE tab10 (test_id integer NOT NULL, data int); -SELECT master_create_distributed_table('tab9', 'test_id', 'hash'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('tab9', 'test_id', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -13,12 +15,6 @@ SELECT master_create_distributed_table('tab10', 'test_id', 'hash'); (1 row) -SELECT master_create_worker_shards('tab9', 1, 1); - master_create_worker_shards ------------------------------ - -(1 row) - TRUNCATE tab9; UPDATE pg_dist_shard SET logicalrelid = 'tab10'::regclass WHERE logicalrelid = 'tab9'::regclass; TRUNCATE tab10; diff --git a/src/test/regress/expected/multi_citus_tools.out b/src/test/regress/expected/multi_citus_tools.out index aecc10b5a..708dc4a79 100644 --- a/src/test/regress/expected/multi_citus_tools.out +++ b/src/test/regress/expected/multi_citus_tools.out @@ -375,16 +375,11 @@ SELECT * FROM run_command_on_workers('select count(*) from pg_dist_partition') O (2 rows) -- make sure run_on_all_placements respects shardstate +SET citus.shard_count TO 5; CREATE TABLE check_placements (key int); -SELECT master_create_distributed_table('check_placements', 'key', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('check_placements', 5, 2); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('check_placements', 'key', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -420,28 +415,17 @@ SELECT * FROM run_command_on_placements('check_placements', 'select 1'); DROP TABLE check_placements CASCADE; -- make sure run_on_all_colocated_placements correctly detects colocation CREATE TABLE check_colocated (key int); -SELECT master_create_distributed_table('check_colocated', 'key', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('check_colocated', 5, 2); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('check_colocated', 'key', 'hash'); + create_distributed_table +-------------------------- (1 row) CREATE TABLE second_table (key int); -SELECT master_create_distributed_table('second_table', 'key', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('second_table', 4, 2); - master_create_worker_shards ------------------------------ +SET citus.shard_count TO 4; +SELECT create_distributed_table('second_table', 'key', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -526,16 +510,11 @@ SELECT * FROM run_command_on_colocated_placements('check_colocated', 'second_tab DROP TABLE check_colocated CASCADE; DROP TABLE second_table CASCADE; -- runs on all shards +SET citus.shard_count TO 5; CREATE TABLE check_shards (key int); -SELECT master_create_distributed_table('check_shards', 'key', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('check_shards', 5, 2); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('check_shards', 'key', 'hash'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_cluster_management.out b/src/test/regress/expected/multi_cluster_management.out index 71dc74a96..ce291cb99 100644 --- a/src/test/regress/expected/multi_cluster_management.out +++ b/src/test/regress/expected/multi_cluster_management.out @@ -76,6 +76,8 @@ SELECT master_get_active_worker_nodes(); (1 row) -- add some shard placements to the cluster +SET citus.shard_count TO 16; +SET citus.shard_replication_factor TO 1; SELECT isactive FROM master_activate_node('localhost', :worker_2_port); isactive ---------- @@ -83,15 +85,9 @@ SELECT isactive FROM master_activate_node('localhost', :worker_2_port); (1 row) CREATE TABLE cluster_management_test (col_1 text, col_2 int); -SELECT master_create_distributed_table('cluster_management_test', 'col_1', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('cluster_management_test', 16, 1); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -373,6 +369,7 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port); (1 row) -- check that a distributed table can be created after adding a node in a transaction +SET citus.shard_count TO 4; SELECT master_remove_node('localhost', :worker_2_port); master_remove_node -------------------- diff --git a/src/test/regress/expected/multi_create_insert_proxy.out b/src/test/regress/expected/multi_create_insert_proxy.out index 36cbf737c..3e2f4d00b 100644 --- a/src/test/regress/expected/multi_create_insert_proxy.out +++ b/src/test/regress/expected/multi_create_insert_proxy.out @@ -45,15 +45,11 @@ CREATE TABLE insert_target ( ); -- squelch WARNINGs that contain worker_port SET client_min_messages TO ERROR; -SELECT master_create_distributed_table('insert_target', 'id', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('insert_target', 2, 1); - master_create_worker_shards ------------------------------ +SET citus.shard_count TO 2; +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('insert_target', 'id', 'hash'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_create_shards.out b/src/test/regress/expected/multi_create_shards.out index 377c784c9..fa6b7a179 100644 --- a/src/test/regress/expected/multi_create_shards.out +++ b/src/test/regress/expected/multi_create_shards.out @@ -41,28 +41,28 @@ CREATE TABLE table_to_distribute ( ); -- use the table WITH (OIDS) set ALTER TABLE table_to_distribute SET WITH OIDS; -SELECT master_create_distributed_table('table_to_distribute', 'id', 'hash'); +SELECT create_distributed_table('table_to_distribute', 'id', 'hash'); ERROR: cannot distribute relation: table_to_distribute DETAIL: Distributed relations must not specify the WITH (OIDS) option in their definitions. -- revert WITH (OIDS) from above ALTER TABLE table_to_distribute SET WITHOUT OIDS; -- use an index instead of table name -SELECT master_create_distributed_table('table_to_distribute_pkey', 'id', 'hash'); +SELECT create_distributed_table('table_to_distribute_pkey', 'id', 'hash'); ERROR: table_to_distribute_pkey is not a regular, foreign or partitioned table -- use a bad column name -SELECT master_create_distributed_table('table_to_distribute', 'bad_column', 'hash'); +SELECT create_distributed_table('table_to_distribute', 'bad_column', 'hash'); ERROR: column "bad_column" of relation "table_to_distribute" does not exist -- use unrecognized partition type -SELECT master_create_distributed_table('table_to_distribute', 'name', 'unrecognized'); +SELECT create_distributed_table('table_to_distribute', 'name', 'unrecognized'); ERROR: invalid input value for enum citus.distribution_type: "unrecognized" LINE 1: ..._distributed_table('table_to_distribute', 'name', 'unrecogni... ^ -- use a partition column of a type lacking any default operator class -SELECT master_create_distributed_table('table_to_distribute', 'json_data', 'hash'); +SELECT create_distributed_table('table_to_distribute', 'json_data', 'hash'); ERROR: data type json has no default operator class for specified partition method DETAIL: Partition column types must have a default operator class defined. -- use a partition column of type lacking the required support function (hash) -SELECT master_create_distributed_table('table_to_distribute', 'test_type_data', 'hash'); +SELECT create_distributed_table('table_to_distribute', 'test_type_data', 'hash'); ERROR: could not identify a hash function for type dummy_type DETAIL: Partition column types must have a hash function defined to use hash partitioning. -- distribute table and inspect side effects @@ -162,16 +162,12 @@ CREATE FOREIGN TABLE foreign_table_to_distribute id bigint ) SERVER fake_fdw_server; -SELECT master_create_distributed_table('foreign_table_to_distribute', 'id', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('foreign_table_to_distribute', 16, 1); +SET citus.shard_count TO 16; +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('foreign_table_to_distribute', 'id', 'hash'); NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined - master_create_worker_shards ------------------------------ + create_distributed_table +-------------------------- (1 row) @@ -204,15 +200,10 @@ CREATE TABLE weird_shard_count name text, id bigint ); -SELECT master_create_distributed_table('weird_shard_count', 'id', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('weird_shard_count', 7, 1); - master_create_worker_shards ------------------------------ +SET citus.shard_count TO 7; +SELECT create_distributed_table('weird_shard_count', 'id', 'hash'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_create_table.out b/src/test/regress/expected/multi_create_table.out index fd6e74ca9..fc3ac69b4 100644 --- a/src/test/regress/expected/multi_create_table.out +++ b/src/test/regress/expected/multi_create_table.out @@ -25,12 +25,12 @@ CREATE TABLE lineitem ( l_shipmode char(10) not null, l_comment varchar(44) not null, PRIMARY KEY(l_orderkey, l_linenumber) ); -SELECT master_create_distributed_table('lineitem', 'l_orderkey', 'append'); +SELECT create_distributed_table('lineitem', 'l_orderkey', 'append'); WARNING: table "lineitem" has a UNIQUE or EXCLUDE constraint DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. - master_create_distributed_table ---------------------------------- + create_distributed_table +-------------------------- (1 row) @@ -46,12 +46,12 @@ CREATE TABLE orders ( o_shippriority integer not null, o_comment varchar(79) not null, PRIMARY KEY(o_orderkey) ); -SELECT master_create_distributed_table('orders', 'o_orderkey', 'append'); +SELECT create_distributed_table('orders', 'o_orderkey', 'append'); WARNING: table "orders" has a UNIQUE or EXCLUDE constraint DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. - master_create_distributed_table ---------------------------------- + create_distributed_table +-------------------------- (1 row) @@ -96,9 +96,9 @@ CREATE TABLE customer_append ( c_acctbal decimal(15,2) not null, c_mktsegment char(10) not null, c_comment varchar(117) not null); -SELECT master_create_distributed_table('customer_append', 'c_custkey', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('customer_append', 'c_custkey', 'append'); + create_distributed_table +-------------------------- (1 row) @@ -139,9 +139,9 @@ CREATE TABLE part_append ( p_container char(10) not null, p_retailprice decimal(15,2) not null, p_comment varchar(23) not null); -SELECT master_create_distributed_table('part_append', 'p_partkey', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('part_append', 'p_partkey', 'append'); + create_distributed_table +-------------------------- (1 row) @@ -173,9 +173,9 @@ CREATE TABLE supplier_single_shard s_acctbal decimal(15,2) not null, s_comment varchar(101) not null ); -SELECT master_create_distributed_table('supplier_single_shard', 's_suppkey', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('supplier_single_shard', 's_suppkey', 'append'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_create_table_constraints.out b/src/test/regress/expected/multi_create_table_constraints.out index 9b3bab25d..364be789c 100644 --- a/src/test/regress/expected/multi_create_table_constraints.out +++ b/src/test/regress/expected/multi_create_table_constraints.out @@ -8,12 +8,12 @@ CREATE TABLE uniq_cns_append_tables partition_col integer UNIQUE, other_col integer ); -SELECT master_create_distributed_table('uniq_cns_append_tables', 'partition_col', 'append'); +SELECT create_distributed_table('uniq_cns_append_tables', 'partition_col', 'append'); WARNING: table "uniq_cns_append_tables" has a UNIQUE or EXCLUDE constraint DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. - master_create_distributed_table ---------------------------------- + create_distributed_table +-------------------------- (1 row) @@ -23,12 +23,12 @@ CREATE TABLE excl_cns_append_tables other_col integer, EXCLUDE (partition_col WITH =) ); -SELECT master_create_distributed_table('excl_cns_append_tables', 'partition_col', 'append'); +SELECT create_distributed_table('excl_cns_append_tables', 'partition_col', 'append'); WARNING: table "excl_cns_append_tables" has a UNIQUE or EXCLUDE constraint DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. - master_create_distributed_table ---------------------------------- + create_distributed_table +-------------------------- (1 row) @@ -39,7 +39,7 @@ CREATE TABLE pk_on_non_part_col partition_col integer, other_col integer PRIMARY KEY ); -SELECT master_create_distributed_table('pk_on_non_part_col', 'partition_col', 'hash'); +SELECT create_distributed_table('pk_on_non_part_col', 'partition_col', 'hash'); ERROR: cannot create constraint on "pk_on_non_part_col" DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE). CREATE TABLE uq_on_non_part_col @@ -47,7 +47,7 @@ CREATE TABLE uq_on_non_part_col partition_col integer, other_col integer UNIQUE ); -SELECT master_create_distributed_table('uq_on_non_part_col', 'partition_col', 'hash'); +SELECT create_distributed_table('uq_on_non_part_col', 'partition_col', 'hash'); ERROR: cannot create constraint on "uq_on_non_part_col" DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE). CREATE TABLE ex_on_non_part_col @@ -56,7 +56,7 @@ CREATE TABLE ex_on_non_part_col other_col integer, EXCLUDE (other_col WITH =) ); -SELECT master_create_distributed_table('ex_on_non_part_col', 'partition_col', 'hash'); +SELECT create_distributed_table('ex_on_non_part_col', 'partition_col', 'hash'); ERROR: cannot create constraint on "ex_on_non_part_col" DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE). -- now show that Citus can distribute unique and EXCLUDE constraints that @@ -69,9 +69,9 @@ CREATE TABLE pk_on_part_col partition_col integer PRIMARY KEY, other_col integer ); -SELECT master_create_distributed_table('pk_on_part_col', 'partition_col', 'hash'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('pk_on_part_col', 'partition_col', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -80,9 +80,9 @@ CREATE TABLE uq_part_col partition_col integer UNIQUE, other_col integer ); -SELECT master_create_distributed_table('uq_part_col', 'partition_col', 'hash'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('uq_part_col', 'partition_col', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -92,21 +92,15 @@ CREATE TABLE uq_two_columns other_col integer, UNIQUE (partition_col, other_col) ); -SELECT master_create_distributed_table('uq_two_columns', 'partition_col', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('uq_two_columns', '4', '2'); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('uq_two_columns', 'partition_col', 'hash'); + create_distributed_table +-------------------------- (1 row) INSERT INTO uq_two_columns (partition_col, other_col) VALUES (1,1); INSERT INTO uq_two_columns (partition_col, other_col) VALUES (1,1); -ERROR: duplicate key value violates unique constraint "uq_two_columns_partition_col_other_col_key_365000" +ERROR: duplicate key value violates unique constraint "uq_two_columns_partition_col_other_col_key_365008" DETAIL: Key (partition_col, other_col)=(1, 1) already exists. CONTEXT: while executing command on localhost:57637 CREATE TABLE ex_on_part_col @@ -115,44 +109,32 @@ CREATE TABLE ex_on_part_col other_col integer, EXCLUDE (partition_col WITH =) ); -SELECT master_create_distributed_table('ex_on_part_col', 'partition_col', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('ex_on_part_col', '4', '2'); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('ex_on_part_col', 'partition_col', 'hash'); + create_distributed_table +-------------------------- (1 row) INSERT INTO ex_on_part_col (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_part_col (partition_col, other_col) VALUES (1,2); -ERROR: conflicting key value violates exclusion constraint "ex_on_part_col_partition_col_excl_365004" +ERROR: conflicting key value violates exclusion constraint "ex_on_part_col_partition_col_excl_365012" DETAIL: Key (partition_col)=(1) conflicts with existing key (partition_col)=(1). -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:57638 CREATE TABLE ex_on_two_columns ( partition_col integer, other_col integer, EXCLUDE (partition_col WITH =, other_col WITH =) ); -SELECT master_create_distributed_table('ex_on_two_columns', 'partition_col', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('ex_on_two_columns', '4', '2'); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('ex_on_two_columns', 'partition_col', 'hash'); + create_distributed_table +-------------------------- (1 row) INSERT INTO ex_on_two_columns (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_two_columns (partition_col, other_col) VALUES (1,1); -ERROR: conflicting key value violates exclusion constraint "ex_on_two_columns_partition_col_other_col_excl_365008" +ERROR: conflicting key value violates exclusion constraint "ex_on_two_columns_partition_col_other_col_excl_365016" DETAIL: Key (partition_col, other_col)=(1, 1) conflicts with existing key (partition_col, other_col)=(1, 1). CONTEXT: while executing command on localhost:57637 CREATE TABLE ex_on_two_columns_prt @@ -161,15 +143,9 @@ CREATE TABLE ex_on_two_columns_prt other_col integer, EXCLUDE (partition_col WITH =, other_col WITH =) WHERE (other_col > 100) ); -SELECT master_create_distributed_table('ex_on_two_columns_prt', 'partition_col', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('ex_on_two_columns_prt', '4', '2'); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('ex_on_two_columns_prt', 'partition_col', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -177,16 +153,16 @@ INSERT INTO ex_on_two_columns_prt (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_two_columns_prt (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_two_columns_prt (partition_col, other_col) VALUES (1,101); INSERT INTO ex_on_two_columns_prt (partition_col, other_col) VALUES (1,101); -ERROR: conflicting key value violates exclusion constraint "ex_on_two_columns_prt_partition_col_other_col_excl_365012" +ERROR: conflicting key value violates exclusion constraint "ex_on_two_columns_prt_partition_col_other_col_excl_365020" DETAIL: Key (partition_col, other_col)=(1, 101) conflicts with existing key (partition_col, other_col)=(1, 101). -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:57638 CREATE TABLE ex_wrong_operator ( partition_col tsrange, other_col tsrange, EXCLUDE USING gist (other_col WITH =, partition_col WITH &&) ); -SELECT master_create_distributed_table('ex_wrong_operator', 'partition_col', 'hash'); +SELECT create_distributed_table('ex_wrong_operator', 'partition_col', 'hash'); ERROR: cannot create constraint on "ex_wrong_operator" DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE). CREATE TABLE ex_overlaps @@ -195,21 +171,15 @@ CREATE TABLE ex_overlaps other_col tsrange, EXCLUDE USING gist (other_col WITH &&, partition_col WITH =) ); -SELECT master_create_distributed_table('ex_overlaps', 'partition_col', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('ex_overlaps', '4', '2'); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('ex_overlaps', 'partition_col', 'hash'); + create_distributed_table +-------------------------- (1 row) INSERT INTO ex_overlaps (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-01 00:00:00, 2016-02-01 00:00:00]'); INSERT INTO ex_overlaps (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-15 00:00:00, 2016-02-01 00:00:00]'); -ERROR: conflicting key value violates exclusion constraint "ex_overlaps_other_col_partition_col_excl_365019" +ERROR: conflicting key value violates exclusion constraint "ex_overlaps_other_col_partition_col_excl_365027" DETAIL: Key (other_col, partition_col)=(["2016-01-15 00:00:00","2016-02-01 00:00:00"], ["2016-01-01 00:00:00","2016-02-01 00:00:00"]) conflicts with existing key (other_col, partition_col)=(["2016-01-01 00:00:00","2016-02-01 00:00:00"], ["2016-01-01 00:00:00","2016-02-01 00:00:00"]). CONTEXT: while executing command on localhost:57638 -- now show that Citus can distribute unique and EXCLUDE constraints that @@ -222,9 +192,9 @@ CREATE TABLE pk_on_part_col_named partition_col integer CONSTRAINT pk_on_part_col_named_pk PRIMARY KEY, other_col integer ); -SELECT master_create_distributed_table('pk_on_part_col_named', 'partition_col', 'hash'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('pk_on_part_col_named', 'partition_col', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -233,9 +203,9 @@ CREATE TABLE uq_part_col_named partition_col integer CONSTRAINT uq_part_col_named_uniq UNIQUE, other_col integer ); -SELECT master_create_distributed_table('uq_part_col_named', 'partition_col', 'hash'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('uq_part_col_named', 'partition_col', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -245,21 +215,15 @@ CREATE TABLE uq_two_columns_named other_col integer, CONSTRAINT uq_two_columns_named_uniq UNIQUE (partition_col, other_col) ); -SELECT master_create_distributed_table('uq_two_columns_named', 'partition_col', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('uq_two_columns_named', '4', '2'); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('uq_two_columns_named', 'partition_col', 'hash'); + create_distributed_table +-------------------------- (1 row) INSERT INTO uq_two_columns_named (partition_col, other_col) VALUES (1,1); INSERT INTO uq_two_columns_named (partition_col, other_col) VALUES (1,1); -ERROR: duplicate key value violates unique constraint "uq_two_columns_named_uniq_365020" +ERROR: duplicate key value violates unique constraint "uq_two_columns_named_uniq_365036" DETAIL: Key (partition_col, other_col)=(1, 1) already exists. CONTEXT: while executing command on localhost:57637 CREATE TABLE ex_on_part_col_named @@ -268,44 +232,32 @@ CREATE TABLE ex_on_part_col_named other_col integer, CONSTRAINT ex_on_part_col_named_exclude EXCLUDE (partition_col WITH =) ); -SELECT master_create_distributed_table('ex_on_part_col_named', 'partition_col', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('ex_on_part_col_named', '4', '2'); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('ex_on_part_col_named', 'partition_col', 'hash'); + create_distributed_table +-------------------------- (1 row) INSERT INTO ex_on_part_col_named (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_part_col_named (partition_col, other_col) VALUES (1,2); -ERROR: conflicting key value violates exclusion constraint "ex_on_part_col_named_exclude_365024" +ERROR: conflicting key value violates exclusion constraint "ex_on_part_col_named_exclude_365040" DETAIL: Key (partition_col)=(1) conflicts with existing key (partition_col)=(1). -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:57638 CREATE TABLE ex_on_two_columns_named ( partition_col integer, other_col integer, CONSTRAINT ex_on_two_columns_named_exclude EXCLUDE (partition_col WITH =, other_col WITH =) ); -SELECT master_create_distributed_table('ex_on_two_columns_named', 'partition_col', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('ex_on_two_columns_named', '4', '2'); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('ex_on_two_columns_named', 'partition_col', 'hash'); + create_distributed_table +-------------------------- (1 row) INSERT INTO ex_on_two_columns_named (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_two_columns_named (partition_col, other_col) VALUES (1,1); -ERROR: conflicting key value violates exclusion constraint "ex_on_two_columns_named_exclude_365028" +ERROR: conflicting key value violates exclusion constraint "ex_on_two_columns_named_exclude_365044" DETAIL: Key (partition_col, other_col)=(1, 1) conflicts with existing key (partition_col, other_col)=(1, 1). CONTEXT: while executing command on localhost:57637 CREATE TABLE ex_multiple_excludes @@ -316,34 +268,28 @@ CREATE TABLE ex_multiple_excludes CONSTRAINT ex_multiple_excludes_excl1 EXCLUDE (partition_col WITH =, other_col WITH =), CONSTRAINT ex_multiple_excludes_excl2 EXCLUDE (partition_col WITH =, other_other_col WITH =) ); -SELECT master_create_distributed_table('ex_multiple_excludes', 'partition_col', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('ex_multiple_excludes', '4', '2'); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('ex_multiple_excludes', 'partition_col', 'hash'); + create_distributed_table +-------------------------- (1 row) INSERT INTO ex_multiple_excludes (partition_col, other_col, other_other_col) VALUES (1,1,1); INSERT INTO ex_multiple_excludes (partition_col, other_col, other_other_col) VALUES (1,1,2); -ERROR: conflicting key value violates exclusion constraint "ex_multiple_excludes_excl1_365032" +ERROR: conflicting key value violates exclusion constraint "ex_multiple_excludes_excl1_365048" DETAIL: Key (partition_col, other_col)=(1, 1) conflicts with existing key (partition_col, other_col)=(1, 1). -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:57638 INSERT INTO ex_multiple_excludes (partition_col, other_col, other_other_col) VALUES (1,2,1); -ERROR: conflicting key value violates exclusion constraint "ex_multiple_excludes_excl2_365032" +ERROR: conflicting key value violates exclusion constraint "ex_multiple_excludes_excl2_365048" DETAIL: Key (partition_col, other_other_col)=(1, 1) conflicts with existing key (partition_col, other_other_col)=(1, 1). -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:57638 CREATE TABLE ex_wrong_operator_named ( partition_col tsrange, other_col tsrange, CONSTRAINT ex_wrong_operator_named_exclude EXCLUDE USING gist (other_col WITH =, partition_col WITH &&) ); -SELECT master_create_distributed_table('ex_wrong_operator_named', 'partition_col', 'hash'); +SELECT create_distributed_table('ex_wrong_operator_named', 'partition_col', 'hash'); ERROR: cannot create constraint on "ex_wrong_operator_named" DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE). CREATE TABLE ex_overlaps_named @@ -352,32 +298,26 @@ CREATE TABLE ex_overlaps_named other_col tsrange, CONSTRAINT ex_overlaps_operator_named_exclude EXCLUDE USING gist (other_col WITH &&, partition_col WITH =) ); -SELECT master_create_distributed_table('ex_overlaps_named', 'partition_col', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('ex_overlaps_named', '4', '2'); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('ex_overlaps_named', 'partition_col', 'hash'); + create_distributed_table +-------------------------- (1 row) INSERT INTO ex_overlaps_named (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-01 00:00:00, 2016-02-01 00:00:00]'); INSERT INTO ex_overlaps_named (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-15 00:00:00, 2016-02-01 00:00:00]'); -ERROR: conflicting key value violates exclusion constraint "ex_overlaps_operator_named_exclude_365039" +ERROR: conflicting key value violates exclusion constraint "ex_overlaps_operator_named_exclude_365055" DETAIL: Key (other_col, partition_col)=(["2016-01-15 00:00:00","2016-02-01 00:00:00"], ["2016-01-01 00:00:00","2016-02-01 00:00:00"]) conflicts with existing key (other_col, partition_col)=(["2016-01-01 00:00:00","2016-02-01 00:00:00"], ["2016-01-01 00:00:00","2016-02-01 00:00:00"]). -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:57637 -- now show that Citus allows unique constraints on range-partitioned tables. CREATE TABLE uq_range_tables ( partition_col integer UNIQUE, other_col integer ); -SELECT master_create_distributed_table('uq_range_tables', 'partition_col', 'range'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('uq_range_tables', 'partition_col', 'range'); + create_distributed_table +-------------------------- (1 row) @@ -388,27 +328,21 @@ CREATE TABLE check_example other_col integer CHECK (other_col >= 100), other_other_col integer CHECK (abs(other_other_col) >= 100) ); -SELECT master_create_distributed_table('check_example', 'partition_col', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('check_example', '2', '2'); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('check_example', 'partition_col', 'hash'); + create_distributed_table +-------------------------- (1 row) \c - - - :worker_1_port -\d check_example_partition_col_key_365040 -Index "public.check_example_partition_col_key_365040" +\d check_example_partition_col_key_365056 +Index "public.check_example_partition_col_key_365056" Column | Type | Definition ---------------+---------+--------------- partition_col | integer | partition_col -unique, btree, for table "public.check_example_365040" +unique, btree, for table "public.check_example_365056" -SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.check_example_365040'::regclass; +SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.check_example_365056'::regclass; Constraint | Definition -------------------------------------+------------------------------------- check_example_other_col_check | CHECK (other_col >= 100) diff --git a/src/test/regress/expected/multi_cross_shard.out b/src/test/regress/expected/multi_cross_shard.out index d015a8104..ed2a0e4e7 100644 --- a/src/test/regress/expected/multi_cross_shard.out +++ b/src/test/regress/expected/multi_cross_shard.out @@ -25,24 +25,24 @@ SHOW citus.multi_task_query_log_level; off (1 row) -SELECT * FROM multi_task_table; +SELECT * FROM multi_task_table ORDER BY 1; id | name ----+-------- 1 | elem_1 - 3 | elem_3 2 | elem_2 + 3 | elem_3 (3 rows) -- Get messages with the log level 'notice' SET citus.multi_task_query_log_level TO notice; -SELECT * FROM multi_task_table; +SELECT * FROM multi_task_table ORDER BY 1; NOTICE: multi-task query about to be executed HINT: Queries are split to multiple tasks if they have to be split into several queries on the workers. id | name ----+-------- 1 | elem_1 - 3 | elem_3 2 | elem_2 + 3 | elem_3 (3 rows) SELECT AVG(id) AS avg_id FROM multi_task_table; @@ -101,13 +101,13 @@ HINT: Queries are split to multiple tasks if they have to be split into several INSERT INTO summary_table SELECT id, SUM(order_count) FROM raw_table WHERE id = 1 GROUP BY id; -- Should have four rows (three rows from the query without where and the one from with where) SET citus.multi_task_query_log_level to DEFAULT; -SELECT * FROM summary_table; +SELECT * FROM summary_table ORDER BY 1,2; id | order_sum ----+----------- 1 | 35 1 | 35 - 3 | 50 2 | 40 + 3 | 50 (4 rows) -- Set log-level to different levels inside the transaction @@ -125,13 +125,13 @@ HINT: Queries are split to multiple tasks if they have to be split into several ROLLBACK; -- Should have only four rows since the transaction is rollbacked. SET citus.multi_task_query_log_level to DEFAULT; -SELECT * FROM summary_table; +SELECT * FROM summary_table ORDER BY 1,2; id | order_sum ----+----------- 1 | 35 1 | 35 - 3 | 50 2 | 40 + 3 | 50 (4 rows) -- Test router-select query diff --git a/src/test/regress/expected/multi_data_types.out b/src/test/regress/expected/multi_data_types.out index 9e16d6e11..c8e77b893 100644 --- a/src/test/regress/expected/multi_data_types.out +++ b/src/test/regress/expected/multi_data_types.out @@ -51,15 +51,10 @@ CREATE TABLE composite_type_partitioned_table id integer, col test_composite_type ); -SELECT master_create_distributed_table('composite_type_partitioned_table', 'col', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('composite_type_partitioned_table', 4, 1); - master_create_worker_shards ------------------------------ +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('composite_type_partitioned_table', 'col', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -88,15 +83,9 @@ CREATE TABLE bugs ( id integer, status bug_status ); -SELECT master_create_distributed_table('bugs', 'status', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('bugs', 4, 1); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('bugs', 'status', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -128,15 +117,9 @@ CREATE TABLE varchar_hash_partitioned_table id int, name varchar ); -SELECT master_create_distributed_table('varchar_hash_partitioned_table', 'name', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('varchar_hash_partitioned_table', 4, 1); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('varchar_hash_partitioned_table', 'name', 'hash'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_deparse_shard_query.out b/src/test/regress/expected/multi_deparse_shard_query.out index 01cce323d..7f2369914 100644 --- a/src/test/regress/expected/multi_deparse_shard_query.out +++ b/src/test/regress/expected/multi_deparse_shard_query.out @@ -2,6 +2,7 @@ -- MULTI_DEPARSE_SHARD_QUERY -- SET citus.next_shard_id TO 13100000; +SET citus.shard_replication_factor TO 1; CREATE FUNCTION deparse_shard_query_test(text) RETURNS VOID AS 'citus' @@ -18,15 +19,9 @@ CREATE TABLE raw_events_1 value_7 int, event_at date DEfAULT now() ); -SELECT master_create_distributed_table('raw_events_1', 'tenant_id', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('raw_events_1', 4, 1); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('raw_events_1', 'tenant_id', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -42,15 +37,9 @@ CREATE TABLE raw_events_2 value_7 int, event_at date DEfAULT now() ); -SELECT master_create_distributed_table('raw_events_2', 'tenant_id', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('raw_events_2', 4, 1); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('raw_events_2', 'tenant_id', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -63,15 +52,9 @@ CREATE TABLE aggregated_events sum_value_5 float, average_value_6 int, rollup_hour date); -SELECT master_create_distributed_table('aggregated_events', 'tenant_id', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('aggregated_events', 4, 1); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('aggregated_events', 'tenant_id', 'hash'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_distribution_metadata.out b/src/test/regress/expected/multi_distribution_metadata.out index 26823c48b..bc9ea521e 100644 --- a/src/test/regress/expected/multi_distribution_metadata.out +++ b/src/test/regress/expected/multi_distribution_metadata.out @@ -46,16 +46,9 @@ CREATE TABLE events_hash ( id bigint, name text ); -SELECT master_create_distributed_table('events_hash', 'name', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - --- create worker shards -SELECT master_create_worker_shards('events_hash', 4, 2); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('events_hash', 'name', 'hash'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_drop_extension.out b/src/test/regress/expected/multi_drop_extension.out index f44c8da14..384536c2e 100644 --- a/src/test/regress/expected/multi_drop_extension.out +++ b/src/test/regress/expected/multi_drop_extension.out @@ -4,9 +4,9 @@ -- Tests around dropping and recreating the extension SET citus.next_shard_id TO 550000; CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); -SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('testtableddl', 'distributecol', 'append'); + create_distributed_table +-------------------------- (1 row) @@ -33,9 +33,9 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port); -- verify that a table can be created after the extension has been dropped and recreated CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); -SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('testtableddl', 'distributecol', 'append'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_index_statements.out b/src/test/regress/expected/multi_index_statements.out index f8381e6df..30573cec8 100644 --- a/src/test/regress/expected/multi_index_statements.out +++ b/src/test/regress/expected/multi_index_statements.out @@ -3,15 +3,14 @@ -- -- Check that we can run CREATE INDEX and DROP INDEX statements on distributed -- tables. -SET citus.next_shard_id TO 640000; -- -- CREATE TEST TABLES -- SET citus.next_shard_id TO 102080; CREATE TABLE index_test_range(a int, b int, c int); -SELECT master_create_distributed_table('index_test_range', 'a', 'range'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('index_test_range', 'a', 'range'); + create_distributed_table +-------------------------- (1 row) @@ -27,23 +26,19 @@ SELECT master_create_empty_shard('index_test_range'); 102081 (1 row) +SET citus.shard_count TO 8; +SET citus.shard_replication_factor TO 2; CREATE TABLE index_test_hash(a int, b int, c int); -SELECT master_create_distributed_table('index_test_hash', 'a', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('index_test_hash', 8, 2); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('index_test_hash', 'a', 'hash'); + create_distributed_table +-------------------------- (1 row) CREATE TABLE index_test_append(a int, b int, c int); -SELECT master_create_distributed_table('index_test_append', 'a', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('index_test_append', 'a', 'append'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_index_statements_0.out b/src/test/regress/expected/multi_index_statements_0.out index f27501833..5aade6198 100644 --- a/src/test/regress/expected/multi_index_statements_0.out +++ b/src/test/regress/expected/multi_index_statements_0.out @@ -3,15 +3,14 @@ -- -- Check that we can run CREATE INDEX and DROP INDEX statements on distributed -- tables. -SET citus.next_shard_id TO 640000; -- -- CREATE TEST TABLES -- SET citus.next_shard_id TO 102080; CREATE TABLE index_test_range(a int, b int, c int); -SELECT master_create_distributed_table('index_test_range', 'a', 'range'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('index_test_range', 'a', 'range'); + create_distributed_table +-------------------------- (1 row) @@ -27,23 +26,19 @@ SELECT master_create_empty_shard('index_test_range'); 102081 (1 row) +SET citus.shard_count TO 8; +SET citus.shard_replication_factor TO 2; CREATE TABLE index_test_hash(a int, b int, c int); -SELECT master_create_distributed_table('index_test_hash', 'a', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('index_test_hash', 8, 2); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('index_test_hash', 'a', 'hash'); + create_distributed_table +-------------------------- (1 row) CREATE TABLE index_test_append(a int, b int, c int); -SELECT master_create_distributed_table('index_test_append', 'a', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('index_test_append', 'a', 'append'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_insert_select.out b/src/test/regress/expected/multi_insert_select.out index f4ceeb9fb..7e1955dcb 100644 --- a/src/test/regress/expected/multi_insert_select.out +++ b/src/test/regress/expected/multi_insert_select.out @@ -75,19 +75,19 @@ WHERE raw_events_first.user_id = raw_events_second.user_id; user_id --------- - 1 - 5 3 4 - 6 + 1 + 5 2 + 6 (6 rows) -- see that we get unique vialitons INSERT INTO raw_events_second SELECT * FROM raw_events_first; ERROR: duplicate key value violates unique constraint "raw_events_second_user_id_value_1_key_13300004" DETAIL: Key (user_id, value_1)=(1, 10) already exists. -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:57638 -- stable functions should be allowed INSERT INTO raw_events_second (user_id, time) SELECT @@ -236,7 +236,7 @@ DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS DEBUG: Plan is router executable ERROR: duplicate key value violates unique constraint "raw_events_second_user_id_value_1_key_13300007" DETAIL: Key (user_id, value_1)=(9, 90) already exists. -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:57637 -- now do some aggregations INSERT INTO agg_events SELECT @@ -266,7 +266,7 @@ DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_t DEBUG: Plan is router executable ERROR: duplicate key value violates unique constraint "agg_events_user_id_value_1_agg_key_13300008" DETAIL: Key (user_id, value_1_agg)=(1, 10) already exists. -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:57637 -- some subquery tests INSERT INTO agg_events (value_1_agg, @@ -287,7 +287,7 @@ DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_t DEBUG: Plan is router executable ERROR: duplicate key value violates unique constraint "agg_events_user_id_value_1_agg_key_13300008" DETAIL: Key (user_id, value_1_agg)=(1, 10) already exists. -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:57637 -- subquery one more level depth INSERT INTO agg_events (value_4_agg, @@ -311,7 +311,7 @@ DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_t DEBUG: Plan is router executable ERROR: duplicate key value violates unique constraint "agg_events_user_id_value_1_agg_key_13300008" DETAIL: Key (user_id, value_1_agg)=(1, 10) already exists. -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:57637 -- join between subqueries INSERT INTO agg_events (user_id) @@ -1722,7 +1722,7 @@ BEGIN; ALTER TABLE reference_table ADD COLUMN z int; INSERT INTO raw_events_first (user_id) SELECT user_id FROM raw_events_second JOIN reference_table USING (user_id); -ERROR: cannot establish a new connection for placement 13300024, since DDL has been executed on a connection that is in use +ERROR: cannot establish a new connection for placement 13300025, since DDL has been executed on a connection that is in use ROLLBACK; -- Insert after copy is allowed BEGIN; @@ -2210,14 +2210,14 @@ TRUNCATE raw_events_first; BEGIN; INSERT INTO raw_events_first (user_id, value_1) SELECT s, s FROM generate_series(1, 5) s; -SELECT user_id, value_1 FROM raw_events_first; +SELECT user_id, value_1 FROM raw_events_first ORDER BY 1; user_id | value_1 ---------+--------- 1 | 1 - 5 | 5 + 2 | 2 3 | 3 4 | 4 - 2 | 2 + 5 | 5 (5 rows) ROLLBACK; diff --git a/src/test/regress/expected/multi_modifications.out b/src/test/regress/expected/multi_modifications.out index 7c20eeaba..a39cf8b65 100644 --- a/src/test/regress/expected/multi_modifications.out +++ b/src/test/regress/expected/multi_modifications.out @@ -20,52 +20,33 @@ CREATE TABLE multiple_hash ( CREATE TABLE insufficient_shards ( LIKE limit_orders ); CREATE TABLE range_partitioned ( LIKE limit_orders ); CREATE TABLE append_partitioned ( LIKE limit_orders ); -SELECT master_create_distributed_table('limit_orders', 'id', 'hash'); - master_create_distributed_table ---------------------------------- +SET citus.shard_count TO 2; +SELECT create_distributed_table('limit_orders', 'id', 'hash'); + create_distributed_table +-------------------------- (1 row) -SELECT master_create_distributed_table('multiple_hash', 'category', 'hash'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('multiple_hash', 'id', 'hash'); +ERROR: column "id" of relation "multiple_hash" does not exist +SELECT create_distributed_table('range_partitioned', 'id', 'range'); + create_distributed_table +-------------------------- (1 row) -SELECT master_create_distributed_table('insufficient_shards', 'id', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_distributed_table('range_partitioned', 'id', 'range'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_distributed_table('append_partitioned', 'id', 'append'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('limit_orders', 2, 2); - master_create_worker_shards ------------------------------ - -(1 row) - -SELECT master_create_worker_shards('multiple_hash', 2, 2); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('append_partitioned', 'id', 'append'); + create_distributed_table +-------------------------- (1 row) +SET citus.shard_count TO 1; +SET citus.shard_replication_factor TO 1; -- make a single shard that covers no partition values -SELECT master_create_worker_shards('insufficient_shards', 1, 1); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('insufficient_shards', 'id', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -177,21 +158,21 @@ INSERT INTO limit_orders VALUES (18811, 'BUD', 14962, '2014-04-05 08:32:16', 'se -5.00); ERROR: new row for relation "limit_orders_750000" violates check constraint "limit_orders_limit_price_check" DETAIL: Failing row contains (18811, BUD, 14962, 2014-04-05 08:32:16, sell, -5.00). -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:57638 -- INSERT violating primary key constraint INSERT INTO limit_orders VALUES (32743, 'LUV', 5994, '2001-04-16 03:37:28', 'buy', 0.58); ERROR: duplicate key value violates unique constraint "limit_orders_pkey_750001" DETAIL: Key (id)=(32743) already exists. -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:57637 -- INSERT violating primary key constraint, with RETURNING specified. INSERT INTO limit_orders VALUES (32743, 'LUV', 5994, '2001-04-16 03:37:28', 'buy', 0.58) RETURNING *; ERROR: duplicate key value violates unique constraint "limit_orders_pkey_750001" DETAIL: Key (id)=(32743) already exists. -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:57637 -- INSERT, with RETURNING specified, failing with a non-constraint error INSERT INTO limit_orders VALUES (34153, 'LEE', 5994, '2001-04-16 03:37:28', 'buy', 0.58) RETURNING id / 0; ERROR: division by zero -CONTEXT: while executing command on localhost:57637 +CONTEXT: while executing command on localhost:57638 SET client_min_messages TO DEFAULT; -- commands with non-constant partition values are supported INSERT INTO limit_orders VALUES (random() * 100, 'ORCL', 152, '2011-08-25 11:50:45', @@ -301,7 +282,7 @@ DELETE FROM limit_orders WHERE id < 0; WITH new_orders AS (INSERT INTO limit_orders VALUES (412, 'FLO', 12, '2017-07-02 16:32:15', 'buy', 66)) DELETE FROM limit_orders RETURNING id / 0; ERROR: division by zero -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:57637 SELECT * FROM limit_orders WHERE id = 412; id | symbol | bidder_id | placed_at | kind | limit_price ----+--------+-----------+-----------+------+------------- @@ -358,7 +339,7 @@ INSERT INTO limit_orders VALUES (275, 'ADR', 140, '2007-07-02 16:32:15', 'sell', INSERT INTO limit_orders VALUES (275, 'ADR', 140, '2007-07-02 16:32:15', 'sell', 43.67); ERROR: duplicate key value violates unique constraint "limit_orders_pkey_750001" DETAIL: Key (id)=(275) already exists. -CONTEXT: while executing command on localhost:57638 +CONTEXT: while executing command on localhost:57637 -- Test that shards which miss a modification are marked unhealthy -- First: Connect to the second worker node \c - - - :worker_2_port @@ -437,10 +418,9 @@ UPDATE limit_orders SET limit_price = 0.00 FROM bidders limit_orders.bidder_id = bidders.id AND bidders.name = 'Bernie Madoff'; ERROR: relation bidders is not distributed --- the connection used for the INSERT is claimed by pull-push, causing the UPDATE to fail +-- should succeed with a CTE WITH deleted_orders AS (INSERT INTO limit_orders VALUES (399, 'PDR', 14, '2017-07-02 16:32:15', 'sell', 43)) UPDATE limit_orders SET symbol = 'GM'; -ERROR: cannot establish a new connection for placement 750003, since DML has been executed on a connection that is in use SELECT symbol, bidder_id FROM limit_orders WHERE id = 246; symbol | bidder_id --------+----------- @@ -620,15 +600,10 @@ SELECT * FROM multiple_hash WHERE category = '2' ORDER BY category, data; -- verify interaction of default values, SERIAL, and RETURNING \set QUIET on CREATE TABLE app_analytics_events (id serial, app_id integer, name text); -SELECT master_create_distributed_table('app_analytics_events', 'app_id', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('app_analytics_events', 4, 1); - master_create_worker_shards ------------------------------ +SET citus.shard_count TO 4; +SELECT create_distributed_table('app_analytics_events', 'app_id', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -784,7 +759,7 @@ SELECT * FROM app_analytics_events ORDER BY id; DROP TABLE app_analytics_events; -- Test multi-row insert with a dropped column before the partition column CREATE TABLE app_analytics_events (id int default 3, app_id integer, name text); -SELECT create_distributed_table('app_analytics_events', 'name'); +SELECT create_distributed_table('app_analytics_events', 'name', colocate_with => 'none'); create_distributed_table -------------------------- diff --git a/src/test/regress/expected/multi_modifications_0.out b/src/test/regress/expected/multi_modifications_0.out new file mode 100644 index 000000000..ca556dd1f --- /dev/null +++ b/src/test/regress/expected/multi_modifications_0.out @@ -0,0 +1,1277 @@ +SET citus.shard_count TO 32; +SET citus.next_shard_id TO 750000; +SET citus.next_placement_id TO 750000; +-- =================================================================== +-- test end-to-end modification functionality +-- =================================================================== +CREATE TYPE order_side AS ENUM ('buy', 'sell'); +CREATE TABLE limit_orders ( + id bigint PRIMARY KEY, + symbol text NOT NULL, + bidder_id bigint NOT NULL, + placed_at timestamp NOT NULL, + kind order_side NOT NULL, + limit_price decimal NOT NULL DEFAULT 0.00 CHECK (limit_price >= 0.00) +); +CREATE TABLE multiple_hash ( + category text NOT NULL, + data text NOT NULL +); +CREATE TABLE insufficient_shards ( LIKE limit_orders ); +CREATE TABLE range_partitioned ( LIKE limit_orders ); +CREATE TABLE append_partitioned ( LIKE limit_orders ); +SET citus.shard_count TO 2; +SELECT create_distributed_table('limit_orders', 'id', 'hash'); + create_distributed_table +-------------------------- + +(1 row) + +SELECT create_distributed_table('multiple_hash', 'id', 'hash'); +ERROR: column "id" of relation "multiple_hash" does not exist +SELECT create_distributed_table('range_partitioned', 'id', 'range'); + create_distributed_table +-------------------------- + +(1 row) + +SELECT create_distributed_table('append_partitioned', 'id', 'append'); + create_distributed_table +-------------------------- + +(1 row) + +SET citus.shard_count TO 1; +SET citus.shard_replication_factor TO 1; +-- make a single shard that covers no partition values +SELECT create_distributed_table('insufficient_shards', 'id', 'hash'); + create_distributed_table +-------------------------- + +(1 row) + +UPDATE pg_dist_shard SET shardminvalue = 0, shardmaxvalue = 0 +WHERE logicalrelid = 'insufficient_shards'::regclass; +-- create range-partitioned shards +SELECT master_create_empty_shard('range_partitioned') AS new_shard_id +\gset +UPDATE pg_dist_shard SET shardminvalue = 0, shardmaxvalue = 49999 +WHERE shardid = :new_shard_id; +SELECT master_create_empty_shard('range_partitioned') AS new_shard_id +\gset +UPDATE pg_dist_shard SET shardminvalue = 50000, shardmaxvalue = 99999 +WHERE shardid = :new_shard_id; +-- create append-partitioned shards +SELECT master_create_empty_shard('append_partitioned') AS new_shard_id +\gset +UPDATE pg_dist_shard SET shardminvalue = 0, shardmaxvalue = 500000 +WHERE shardid = :new_shard_id; +SELECT master_create_empty_shard('append_partitioned') AS new_shard_id +\gset +UPDATE pg_dist_shard SET shardminvalue = 500000, shardmaxvalue = 1000000 +WHERE shardid = :new_shard_id; +-- basic single-row INSERT +INSERT INTO limit_orders VALUES (32743, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', + 20.69); +SELECT COUNT(*) FROM limit_orders WHERE id = 32743; + count +------- + 1 +(1 row) + +-- basic single-row INSERT with RETURNING +INSERT INTO limit_orders VALUES (32744, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69) RETURNING *; + id | symbol | bidder_id | placed_at | kind | limit_price +-------+--------+-----------+--------------------------+------+------------- + 32744 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69 +(1 row) + +-- try a single-row INSERT with no shard to receive it +INSERT INTO insufficient_shards VALUES (32743, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', + 20.69); +ERROR: cannot find shard interval +DETAIL: Hash of the partition column value does not fall into any shards. +-- try an insert to a range-partitioned table +INSERT INTO range_partitioned VALUES (32743, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', + 20.69); +-- also insert to an append-partitioned table +INSERT INTO append_partitioned VALUES (414123, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', + 20.69); +-- ensure the values are where we put them and query to ensure they are properly pruned +SET client_min_messages TO 'DEBUG2'; +SET citus.task_executor_type TO 'real-time'; +SELECT * FROM range_partitioned WHERE id = 32743; +DEBUG: Creating router plan +DEBUG: Plan is router executable + id | symbol | bidder_id | placed_at | kind | limit_price +-------+--------+-----------+--------------------------+------+------------- + 32743 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69 +(1 row) + +SELECT * FROM append_partitioned WHERE id = 414123; +DEBUG: Plan is router executable + id | symbol | bidder_id | placed_at | kind | limit_price +--------+--------+-----------+--------------------------+------+------------- + 414123 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69 +(1 row) + +SET client_min_messages TO DEFAULT; +SET citus.task_executor_type TO DEFAULT; +-- try inserting without a range-partitioned shard to receive the value +INSERT INTO range_partitioned VALUES (999999, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', + 20.69); +ERROR: cannot run INSERT command which targets no shards +HINT: Make sure you have created a shard which can receive this partition column value. +-- and insert into an append-partitioned table with a value that spans shards: +INSERT INTO append_partitioned VALUES (500000, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', + 20.69); +ERROR: cannot run INSERT command which targets multiple shards +HINT: Make sure the value for partition column "id" falls into a single shard. +-- INSERT with DEFAULT in the target list +INSERT INTO limit_orders VALUES (12756, 'MSFT', 10959, '2013-05-08 07:29:23', 'sell', + DEFAULT); +SELECT COUNT(*) FROM limit_orders WHERE id = 12756; + count +------- + 1 +(1 row) + +-- INSERT with expressions in target list +INSERT INTO limit_orders VALUES (430, upper('ibm'), 214, timestamp '2003-01-28 10:31:17' + + interval '5 hours', 'buy', sqrt(2)); +SELECT COUNT(*) FROM limit_orders WHERE id = 430; + count +------- + 1 +(1 row) + +-- INSERT without partition key +INSERT INTO limit_orders DEFAULT VALUES; +ERROR: cannot perform an INSERT without a partition column value +-- squelch WARNINGs that contain worker_port +SET client_min_messages TO ERROR; +-- INSERT violating NOT NULL constraint +INSERT INTO limit_orders VALUES (NULL, 'T', 975234, DEFAULT); +ERROR: cannot perform an INSERT with NULL in the partition column +-- INSERT violating column constraint +INSERT INTO limit_orders VALUES (18811, 'BUD', 14962, '2014-04-05 08:32:16', 'sell', + -5.00); +ERROR: new row for relation "limit_orders_750000" violates check constraint "limit_orders_limit_price_check" +DETAIL: Failing row contains (18811, BUD, 14962, 2014-04-05 08:32:16, sell, -5.00). +CONTEXT: while executing command on localhost:57637 +-- INSERT violating primary key constraint +INSERT INTO limit_orders VALUES (32743, 'LUV', 5994, '2001-04-16 03:37:28', 'buy', 0.58); +ERROR: duplicate key value violates unique constraint "limit_orders_pkey_750001" +DETAIL: Key (id)=(32743) already exists. +CONTEXT: while executing command on localhost:57638 +-- INSERT violating primary key constraint, with RETURNING specified. +INSERT INTO limit_orders VALUES (32743, 'LUV', 5994, '2001-04-16 03:37:28', 'buy', 0.58) RETURNING *; +ERROR: duplicate key value violates unique constraint "limit_orders_pkey_750001" +DETAIL: Key (id)=(32743) already exists. +CONTEXT: while executing command on localhost:57638 +-- INSERT, with RETURNING specified, failing with a non-constraint error +INSERT INTO limit_orders VALUES (34153, 'LEE', 5994, '2001-04-16 03:37:28', 'buy', 0.58) RETURNING id / 0; +ERROR: division by zero +CONTEXT: while executing command on localhost:57637 +SET client_min_messages TO DEFAULT; +-- commands with non-constant partition values are supported +INSERT INTO limit_orders VALUES (random() * 100, 'ORCL', 152, '2011-08-25 11:50:45', + 'sell', 0.58); +-- values for other columns are totally fine +INSERT INTO limit_orders VALUES (2036, 'GOOG', 5634, now(), 'buy', random()); +-- commands with mutable functions in their quals +DELETE FROM limit_orders WHERE id = 246 AND bidder_id = (random() * 1000); +ERROR: functions used in the WHERE clause of modification queries on distributed tables must not be VOLATILE +-- commands with mutable but non-volatile functions(ie: stable func.) in their quals +-- (the cast to timestamp is because the timestamp_eq_timestamptz operator is stable) +DELETE FROM limit_orders WHERE id = 246 AND placed_at = current_timestamp::timestamp; +-- multi-row inserts are supported +INSERT INTO limit_orders VALUES (12037, 'GOOG', 5634, '2001-04-16 03:37:28', 'buy', 0.50), + (12038, 'GOOG', 5634, '2001-04-17 03:37:28', 'buy', 2.50), + (12039, 'GOOG', 5634, '2001-04-18 03:37:28', 'buy', 1.50); +SELECT COUNT(*) FROM limit_orders WHERE id BETWEEN 12037 AND 12039; + count +------- + 3 +(1 row) + +-- even those with functions and returning +INSERT INTO limit_orders VALUES (22037, 'GOOG', 5634, now(), 'buy', 0.50), + (22038, 'GOOG', 5634, now(), 'buy', 2.50), + (22039, 'GOOG', 5634, now(), 'buy', 1.50) +RETURNING id; + id +------- + 22038 + 22039 + 22037 +(3 rows) + +SELECT COUNT(*) FROM limit_orders WHERE id BETWEEN 22037 AND 22039; + count +------- + 3 +(1 row) + +-- even those with functions in their partition columns +INSERT INTO limit_orders VALUES (random() * 10 + 70000, 'GOOG', 5634, now(), 'buy', 0.50), + (random() * 10 + 80000, 'GOOG', 5634, now(), 'buy', 2.50), + (random() * 10 + 80090, 'GOOG', 5634, now(), 'buy', 1.50); +SELECT COUNT(*) FROM limit_orders WHERE id BETWEEN 70000 AND 90000; + count +------- + 3 +(1 row) + +-- commands containing a CTE are supported +WITH deleted_orders AS (DELETE FROM limit_orders WHERE id < 0 RETURNING *) +INSERT INTO limit_orders SELECT * FROM deleted_orders; +-- test simple DELETE +INSERT INTO limit_orders VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69); +SELECT COUNT(*) FROM limit_orders WHERE id = 246; + count +------- + 1 +(1 row) + +DELETE FROM limit_orders WHERE id = 246; +SELECT COUNT(*) FROM limit_orders WHERE id = 246; + count +------- + 0 +(1 row) + +-- test simple DELETE with RETURNING +DELETE FROM limit_orders WHERE id = 430 RETURNING *; + id | symbol | bidder_id | placed_at | kind | limit_price +-----+--------+-----------+--------------------------+------+----------------- + 430 | IBM | 214 | Tue Jan 28 15:31:17 2003 | buy | 1.4142135623731 +(1 row) + +SELECT COUNT(*) FROM limit_orders WHERE id = 430; + count +------- + 0 +(1 row) + +-- DELETE with expression in WHERE clause +INSERT INTO limit_orders VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69); +SELECT COUNT(*) FROM limit_orders WHERE id = 246; + count +------- + 1 +(1 row) + +DELETE FROM limit_orders WHERE id = (2 * 123); +SELECT COUNT(*) FROM limit_orders WHERE id = 246; + count +------- + 0 +(1 row) + +-- commands with a USING clause are supported +CREATE TABLE bidders ( name text, id bigint ); +DELETE FROM limit_orders USING bidders WHERE limit_orders.id = 246 AND + limit_orders.bidder_id = bidders.id AND + bidders.name = 'Bernie Madoff'; +ERROR: relation bidders is not distributed +-- commands containing a CTE are supported +WITH new_orders AS (INSERT INTO limit_orders VALUES (411, 'FLO', 12, '2017-07-02 16:32:15', 'buy', 66)) +DELETE FROM limit_orders WHERE id < 0; +-- we have to be careful that modifying CTEs are part of the transaction and can thus roll back +WITH new_orders AS (INSERT INTO limit_orders VALUES (412, 'FLO', 12, '2017-07-02 16:32:15', 'buy', 66)) +DELETE FROM limit_orders RETURNING id / 0; +ERROR: division by zero +CONTEXT: while executing command on localhost:57638 +SELECT * FROM limit_orders WHERE id = 412; + id | symbol | bidder_id | placed_at | kind | limit_price +----+--------+-----------+-----------+------+------------- +(0 rows) + +INSERT INTO limit_orders VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69); +-- simple UPDATE +UPDATE limit_orders SET symbol = 'GM' WHERE id = 246; +SELECT symbol FROM limit_orders WHERE id = 246; + symbol +-------- + GM +(1 row) + +-- simple UPDATE with RETURNING +UPDATE limit_orders SET symbol = 'GM' WHERE id = 246 RETURNING *; + id | symbol | bidder_id | placed_at | kind | limit_price +-----+--------+-----------+--------------------------+------+------------- + 246 | GM | 162 | Mon Jul 02 16:32:15 2007 | sell | 20.69 +(1 row) + +-- expression UPDATE +UPDATE limit_orders SET bidder_id = 6 * 3 WHERE id = 246; +SELECT bidder_id FROM limit_orders WHERE id = 246; + bidder_id +----------- + 18 +(1 row) + +-- expression UPDATE with RETURNING +UPDATE limit_orders SET bidder_id = 6 * 5 WHERE id = 246 RETURNING *; + id | symbol | bidder_id | placed_at | kind | limit_price +-----+--------+-----------+--------------------------+------+------------- + 246 | GM | 30 | Mon Jul 02 16:32:15 2007 | sell | 20.69 +(1 row) + +-- multi-column UPDATE +UPDATE limit_orders SET (kind, limit_price) = ('buy', DEFAULT) WHERE id = 246; +SELECT kind, limit_price FROM limit_orders WHERE id = 246; + kind | limit_price +------+------------- + buy | 0.00 +(1 row) + +-- multi-column UPDATE with RETURNING +UPDATE limit_orders SET (kind, limit_price) = ('buy', 999) WHERE id = 246 RETURNING *; + id | symbol | bidder_id | placed_at | kind | limit_price +-----+--------+-----------+--------------------------+------+------------- + 246 | GM | 30 | Mon Jul 02 16:32:15 2007 | buy | 999 +(1 row) + +-- Test that on unique contraint violations, we fail fast +INSERT INTO limit_orders VALUES (275, 'ADR', 140, '2007-07-02 16:32:15', 'sell', 43.67); +INSERT INTO limit_orders VALUES (275, 'ADR', 140, '2007-07-02 16:32:15', 'sell', 43.67); +ERROR: duplicate key value violates unique constraint "limit_orders_pkey_750001" +DETAIL: Key (id)=(275) already exists. +CONTEXT: while executing command on localhost:57638 +-- Test that shards which miss a modification are marked unhealthy +-- First: Connect to the second worker node +\c - - - :worker_2_port +-- Second: Move aside limit_orders shard on the second worker node +ALTER TABLE limit_orders_750000 RENAME TO renamed_orders; +-- Third: Connect back to master node +\c - - - :master_port +-- Fourth: Perform an INSERT on the remaining node +INSERT INTO limit_orders VALUES (276, 'ADR', 140, '2007-07-02 16:32:15', 'sell', 43.67); +WARNING: relation "public.limit_orders_750000" does not exist +CONTEXT: while executing command on localhost:57638 +-- Last: Verify the insert worked but the deleted placement is now unhealthy +SELECT count(*) FROM limit_orders WHERE id = 276; + count +------- + 1 +(1 row) + +SELECT count(*) +FROM pg_dist_shard_placement AS sp, + pg_dist_shard AS s +WHERE sp.shardid = s.shardid +AND sp.nodename = 'localhost' +AND sp.nodeport = :worker_2_port +AND sp.shardstate = 3 +AND s.logicalrelid = 'limit_orders'::regclass; + count +------- + 1 +(1 row) + +-- Test that if all shards miss a modification, no state change occurs +-- First: Connect to the first worker node +\c - - - :worker_1_port +-- Second: Move aside limit_orders shard on the second worker node +ALTER TABLE limit_orders_750000 RENAME TO renamed_orders; +-- Third: Connect back to master node +\c - - - :master_port +-- Fourth: Perform an INSERT on the remaining node +INSERT INTO limit_orders VALUES (276, 'ADR', 140, '2007-07-02 16:32:15', 'sell', 43.67); +ERROR: relation "public.limit_orders_750000" does not exist +CONTEXT: while executing command on localhost:57637 +-- Last: Verify worker is still healthy +SELECT count(*) +FROM pg_dist_shard_placement AS sp, + pg_dist_shard AS s +WHERE sp.shardid = s.shardid +AND sp.nodename = 'localhost' +AND sp.nodeport = :worker_1_port +AND sp.shardstate = 1 +AND s.logicalrelid = 'limit_orders'::regclass; + count +------- + 2 +(1 row) + +-- Undo our change... +-- First: Connect to the first worker node +\c - - - :worker_1_port +-- Second: Move aside limit_orders shard on the second worker node +ALTER TABLE renamed_orders RENAME TO limit_orders_750000; +-- Third: Connect back to master node +\c - - - :master_port +-- attempting to change the partition key is unsupported +UPDATE limit_orders SET id = 0 WHERE id = 246; +ERROR: modifying the partition value of rows is not allowed +UPDATE limit_orders SET id = 0 WHERE id = 0 OR id = 246; +ERROR: modifying the partition value of rows is not allowed +-- setting the partition column value to itself is allowed +UPDATE limit_orders SET id = 246 WHERE id = 246; +UPDATE limit_orders SET id = 246 WHERE id = 246 AND symbol = 'GM'; +UPDATE limit_orders SET id = limit_orders.id WHERE id = 246; +-- UPDATEs with a FROM clause are unsupported +UPDATE limit_orders SET limit_price = 0.00 FROM bidders + WHERE limit_orders.id = 246 AND + limit_orders.bidder_id = bidders.id AND + bidders.name = 'Bernie Madoff'; +ERROR: relation bidders is not distributed +-- should succeed with a CTE +WITH deleted_orders AS (INSERT INTO limit_orders VALUES (399, 'PDR', 14, '2017-07-02 16:32:15', 'sell', 43)) +UPDATE limit_orders SET symbol = 'GM'; +SELECT symbol, bidder_id FROM limit_orders WHERE id = 246; + symbol | bidder_id +--------+----------- + GM | 30 +(1 row) + +-- updates referencing just a var are supported +UPDATE limit_orders SET bidder_id = id WHERE id = 246; +-- updates referencing a column are supported +UPDATE limit_orders SET bidder_id = bidder_id + 1 WHERE id = 246; +-- IMMUTABLE functions are allowed +UPDATE limit_orders SET symbol = LOWER(symbol) WHERE id = 246; +SELECT symbol, bidder_id FROM limit_orders WHERE id = 246; + symbol | bidder_id +--------+----------- + gm | 247 +(1 row) + +-- IMMUTABLE functions are allowed -- even in returning +UPDATE limit_orders SET symbol = UPPER(symbol) WHERE id = 246 RETURNING id, LOWER(symbol), symbol; + id | lower | symbol +-----+-------+-------- + 246 | gm | GM +(1 row) + +ALTER TABLE limit_orders ADD COLUMN array_of_values integer[]; +-- updates referencing STABLE functions are allowed +UPDATE limit_orders SET placed_at = LEAST(placed_at, now()::timestamp) WHERE id = 246; +-- so are binary operators +UPDATE limit_orders SET array_of_values = 1 || array_of_values WHERE id = 246; +CREATE FUNCTION immutable_append(old_values int[], new_value int) +RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; +\c - - - :worker_1_port +CREATE FUNCTION immutable_append(old_values int[], new_value int) +RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; +\c - - - :worker_2_port +CREATE FUNCTION immutable_append(old_values int[], new_value int) +RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; +\c - - - :master_port +-- immutable function calls with vars are also allowed +UPDATE limit_orders +SET array_of_values = immutable_append(array_of_values, 2) WHERE id = 246; +CREATE FUNCTION stable_append(old_values int[], new_value int) +RETURNS int[] AS $$ BEGIN RETURN old_values || new_value; END; $$ +LANGUAGE plpgsql STABLE; +-- but STABLE function calls with vars are not allowed +UPDATE limit_orders +SET array_of_values = stable_append(array_of_values, 3) WHERE id = 246; +ERROR: STABLE functions used in UPDATE queries cannot be called with column references +SELECT array_of_values FROM limit_orders WHERE id = 246; + array_of_values +----------------- + {1,2} +(1 row) + +-- STRICT functions work as expected +CREATE FUNCTION temp_strict_func(integer,integer) RETURNS integer AS +'SELECT COALESCE($1, 2) + COALESCE($1, 3);' LANGUAGE SQL STABLE STRICT; +UPDATE limit_orders SET bidder_id = temp_strict_func(1, null) WHERE id = 246; +ERROR: null value in column "bidder_id" violates not-null constraint +DETAIL: Failing row contains (246, GM, null, 2007-07-02 16:32:15, buy, 999, {1,2}). +CONTEXT: while executing command on localhost:57637 +SELECT array_of_values FROM limit_orders WHERE id = 246; + array_of_values +----------------- + {1,2} +(1 row) + +ALTER TABLE limit_orders DROP array_of_values; +-- even in RETURNING +UPDATE limit_orders SET placed_at = placed_at WHERE id = 246 RETURNING NOW(); +ERROR: non-IMMUTABLE functions are not allowed in the RETURNING clause +-- check that multi-row UPDATE/DELETEs with RETURNING work +INSERT INTO multiple_hash VALUES ('0', '1'); +INSERT INTO multiple_hash VALUES ('0', '2'); +INSERT INTO multiple_hash VALUES ('0', '3'); +INSERT INTO multiple_hash VALUES ('0', '4'); +INSERT INTO multiple_hash VALUES ('0', '5'); +INSERT INTO multiple_hash VALUES ('0', '6'); +UPDATE multiple_hash SET data = data ||'-1' WHERE category = '0' RETURNING *; + category | data +----------+------ + 0 | 1-1 + 0 | 2-1 + 0 | 3-1 + 0 | 4-1 + 0 | 5-1 + 0 | 6-1 +(6 rows) + +DELETE FROM multiple_hash WHERE category = '0' RETURNING *; + category | data +----------+------ + 0 | 1-1 + 0 | 2-1 + 0 | 3-1 + 0 | 4-1 + 0 | 5-1 + 0 | 6-1 +(6 rows) + +-- ensure returned row counters are correct +\set QUIET off +INSERT INTO multiple_hash VALUES ('1', '1'); +INSERT 0 1 +INSERT INTO multiple_hash VALUES ('1', '2'); +INSERT 0 1 +INSERT INTO multiple_hash VALUES ('1', '3'); +INSERT 0 1 +INSERT INTO multiple_hash VALUES ('2', '1'); +INSERT 0 1 +INSERT INTO multiple_hash VALUES ('2', '2'); +INSERT 0 1 +INSERT INTO multiple_hash VALUES ('2', '3'); +INSERT 0 1 +INSERT INTO multiple_hash VALUES ('2', '3') RETURNING *; + category | data +----------+------ + 2 | 3 +(1 row) + +INSERT 0 1 +-- check that update return the right number of rows +-- one row +UPDATE multiple_hash SET data = data ||'-1' WHERE category = '1' AND data = '1'; +UPDATE 1 +-- three rows +UPDATE multiple_hash SET data = data ||'-2' WHERE category = '1'; +UPDATE 3 +-- three rows, with RETURNING +UPDATE multiple_hash SET data = data ||'-2' WHERE category = '1' RETURNING category; + category +---------- + 1 + 1 + 1 +(3 rows) + +UPDATE 3 +-- check +SELECT * FROM multiple_hash WHERE category = '1' ORDER BY category, data; + category | data +----------+--------- + 1 | 1-1-2-2 + 1 | 2-2-2 + 1 | 3-2-2 +(3 rows) + +-- check that deletes return the right number of rows +-- one row +DELETE FROM multiple_hash WHERE category = '2' AND data = '1'; +DELETE 1 +-- two rows +DELETE FROM multiple_hash WHERE category = '2'; +DELETE 3 +-- three rows, with RETURNING +DELETE FROM multiple_hash WHERE category = '1' RETURNING category; + category +---------- + 1 + 1 + 1 +(3 rows) + +DELETE 3 +-- check +SELECT * FROM multiple_hash WHERE category = '1' ORDER BY category, data; + category | data +----------+------ +(0 rows) + +SELECT * FROM multiple_hash WHERE category = '2' ORDER BY category, data; + category | data +----------+------ +(0 rows) + +-- verify interaction of default values, SERIAL, and RETURNING +\set QUIET on +CREATE TABLE app_analytics_events (id serial, app_id integer, name text); +SET citus.shard_count TO 4; +SELECT create_distributed_table('app_analytics_events', 'app_id', 'hash'); + create_distributed_table +-------------------------- + +(1 row) + +INSERT INTO app_analytics_events VALUES (DEFAULT, 101, 'Fauxkemon Geaux') RETURNING id; + id +---- + 1 +(1 row) + +INSERT INTO app_analytics_events (app_id, name) VALUES (102, 'Wayz') RETURNING id; + id +---- + 2 +(1 row) + +INSERT INTO app_analytics_events (app_id, name) VALUES (103, 'Mynt') RETURNING *; + id | app_id | name +----+--------+------ + 3 | 103 | Mynt +(1 row) + +DROP TABLE app_analytics_events; +-- again with serial in the partition column +CREATE TABLE app_analytics_events (id serial, app_id integer, name text); +SELECT create_distributed_table('app_analytics_events', 'id'); + create_distributed_table +-------------------------- + +(1 row) + +INSERT INTO app_analytics_events VALUES (DEFAULT, 101, 'Fauxkemon Geaux') RETURNING id; + id +---- + 1 +(1 row) + +INSERT INTO app_analytics_events (app_id, name) VALUES (102, 'Wayz') RETURNING id; + id +---- + 2 +(1 row) + +INSERT INTO app_analytics_events (app_id, name) VALUES (103, 'Mynt') RETURNING *; + id | app_id | name +----+--------+------ + 3 | 103 | Mynt +(1 row) + +-- Test multi-row insert with serial in the partition column +INSERT INTO app_analytics_events (app_id, name) +VALUES (104, 'Wayz'), (105, 'Mynt') RETURNING *; + id | app_id | name +----+--------+------ + 5 | 105 | Mynt + 4 | 104 | Wayz +(2 rows) + +INSERT INTO app_analytics_events (id, name) +VALUES (DEFAULT, 'Foo'), (300, 'Wah') RETURNING *; + id | app_id | name +-----+--------+------ + 6 | | Foo + 300 | | Wah +(2 rows) + +PREPARE prep(varchar) AS +INSERT INTO app_analytics_events (id, name) +VALUES (DEFAULT, $1 || '.1'), (400 , $1 || '.2') RETURNING *; +EXECUTE prep('version-1'); + id | app_id | name +-----+--------+------------- + 7 | | version-1.1 + 400 | | version-1.2 +(2 rows) + +EXECUTE prep('version-2'); + id | app_id | name +-----+--------+------------- + 8 | | version-2.1 + 400 | | version-2.2 +(2 rows) + +EXECUTE prep('version-3'); + id | app_id | name +-----+--------+------------- + 400 | | version-3.2 + 9 | | version-3.1 +(2 rows) + +EXECUTE prep('version-4'); + id | app_id | name +-----+--------+------------- + 10 | | version-4.1 + 400 | | version-4.2 +(2 rows) + +EXECUTE prep('version-5'); + id | app_id | name +-----+--------+------------- + 400 | | version-5.2 + 11 | | version-5.1 +(2 rows) + +EXECUTE prep('version-6'); + id | app_id | name +-----+--------+------------- + 400 | | version-6.2 + 12 | | version-6.1 +(2 rows) + +SELECT * FROM app_analytics_events ORDER BY id, name; + id | app_id | name +-----+--------+----------------- + 1 | 101 | Fauxkemon Geaux + 2 | 102 | Wayz + 3 | 103 | Mynt + 4 | 104 | Wayz + 5 | 105 | Mynt + 6 | | Foo + 7 | | version-1.1 + 8 | | version-2.1 + 9 | | version-3.1 + 10 | | version-4.1 + 11 | | version-5.1 + 12 | | version-6.1 + 300 | | Wah + 400 | | version-1.2 + 400 | | version-2.2 + 400 | | version-3.2 + 400 | | version-4.2 + 400 | | version-5.2 + 400 | | version-6.2 +(19 rows) + +TRUNCATE app_analytics_events; +-- Test multi-row insert with a dropped column +ALTER TABLE app_analytics_events DROP COLUMN app_id; +INSERT INTO app_analytics_events (name) +VALUES ('Wayz'), ('Mynt') RETURNING *; + id | name +----+------ + 14 | Mynt + 13 | Wayz +(2 rows) + +SELECT * FROM app_analytics_events ORDER BY id; + id | name +----+------ + 13 | Wayz + 14 | Mynt +(2 rows) + +DROP TABLE app_analytics_events; +-- Test multi-row insert with a dropped column before the partition column +CREATE TABLE app_analytics_events (id int default 3, app_id integer, name text); +SELECT create_distributed_table('app_analytics_events', 'name', colocate_with => 'none'); + create_distributed_table +-------------------------- + +(1 row) + +ALTER TABLE app_analytics_events DROP COLUMN app_id; +INSERT INTO app_analytics_events (name) +VALUES ('Wayz'), ('Mynt') RETURNING *; + id | name +----+------ + 3 | Mynt + 3 | Wayz +(2 rows) + +SELECT * FROM app_analytics_events WHERE name = 'Wayz'; + id | name +----+------ + 3 | Wayz +(1 row) + +DROP TABLE app_analytics_events; +-- Test multi-row insert with serial in a reference table +CREATE TABLE app_analytics_events (id serial, app_id integer, name text); +SELECT create_reference_table('app_analytics_events'); + create_reference_table +------------------------ + +(1 row) + +INSERT INTO app_analytics_events (app_id, name) +VALUES (104, 'Wayz'), (105, 'Mynt') RETURNING *; + id | app_id | name +----+--------+------ + 1 | 104 | Wayz + 2 | 105 | Mynt +(2 rows) + +SELECT * FROM app_analytics_events ORDER BY id; + id | app_id | name +----+--------+------ + 1 | 104 | Wayz + 2 | 105 | Mynt +(2 rows) + +DROP TABLE app_analytics_events; +-- Test multi-row insert with serial in a non-partition column +CREATE TABLE app_analytics_events (id int, app_id serial, name text); +SELECT create_distributed_table('app_analytics_events', 'id'); + create_distributed_table +-------------------------- + +(1 row) + +INSERT INTO app_analytics_events (id, name) +VALUES (99, 'Wayz'), (98, 'Mynt') RETURNING name, app_id; + name | app_id +------+-------- + Mynt | 2 + Wayz | 1 +(2 rows) + +SELECT * FROM app_analytics_events ORDER BY id; + id | app_id | name +----+--------+------ + 98 | 2 | Mynt + 99 | 1 | Wayz +(2 rows) + +DROP TABLE app_analytics_events; +-- test UPDATE with subqueries +CREATE TABLE raw_table (id bigint, value bigint); +CREATE TABLE summary_table ( + id bigint, + min_value numeric, + average_value numeric, + count int, + uniques int); +SELECT create_distributed_table('raw_table', 'id'); + create_distributed_table +-------------------------- + +(1 row) + +SELECT create_distributed_table('summary_table', 'id'); + create_distributed_table +-------------------------- + +(1 row) + +INSERT INTO raw_table VALUES (1, 100); +INSERT INTO raw_table VALUES (1, 200); +INSERT INTO raw_table VALUES (1, 200); +INSERT INTO raw_table VALUES (1, 300); +INSERT INTO raw_table VALUES (2, 400); +INSERT INTO raw_table VALUES (2, 500); +INSERT INTO summary_table VALUES (1); +INSERT INTO summary_table VALUES (2); +SELECT * FROM summary_table ORDER BY id; + id | min_value | average_value | count | uniques +----+-----------+---------------+-------+--------- + 1 | | | | + 2 | | | | +(2 rows) + +UPDATE summary_table SET average_value = average_query.average FROM ( + SELECT avg(value) AS average FROM raw_table WHERE id = 1 + ) average_query +WHERE id = 1; +SELECT * FROM summary_table ORDER BY id; + id | min_value | average_value | count | uniques +----+-----------+----------------------+-------+--------- + 1 | | 200.0000000000000000 | | + 2 | | | | +(2 rows) + +-- try different syntax +UPDATE summary_table SET (min_value, average_value) = + (SELECT min(value), avg(value) FROM raw_table WHERE id = 2) +WHERE id = 2; +SELECT * FROM summary_table ORDER BY id; + id | min_value | average_value | count | uniques +----+-----------+----------------------+-------+--------- + 1 | | 200.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | | +(2 rows) + +UPDATE summary_table SET min_value = 100 + WHERE id IN (SELECT id FROM raw_table WHERE id = 1 and value > 100) AND id = 1; +SELECT * FROM summary_table ORDER BY id; + id | min_value | average_value | count | uniques +----+-----------+----------------------+-------+--------- + 1 | 100 | 200.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | | +(2 rows) + +-- indeed, we don't need filter on UPDATE explicitly if SELECT already prunes to one shard +UPDATE summary_table SET uniques = 2 + WHERE id IN (SELECT id FROM raw_table WHERE id = 1 and value IN (100, 200)); +SELECT * FROM summary_table ORDER BY id; + id | min_value | average_value | count | uniques +----+-----------+----------------------+-------+--------- + 1 | 100 | 200.0000000000000000 | | 2 + 2 | 400 | 450.0000000000000000 | | +(2 rows) + +-- use inner results for non-partition column +UPDATE summary_table SET uniques = NULL + WHERE min_value IN (SELECT value FROM raw_table WHERE id = 1) AND id = 1; +SELECT * FROM summary_table ORDER BY id; + id | min_value | average_value | count | uniques +----+-----------+----------------------+-------+--------- + 1 | 100 | 200.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | | +(2 rows) + +-- these should not update anything +UPDATE summary_table SET average_value = average_query.average FROM ( + SELECT avg(value) AS average FROM raw_table WHERE id = 1 AND id = 4 + ) average_query +WHERE id = 1 AND id = 4; +UPDATE summary_table SET average_value = average_query.average FROM ( + SELECT avg(value) AS average FROM raw_table WHERE id = 1 + ) average_query +WHERE id = 1 AND id = 4; +SELECT * FROM summary_table ORDER BY id; + id | min_value | average_value | count | uniques +----+-----------+----------------------+-------+--------- + 1 | 100 | 200.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | | +(2 rows) + +-- update with NULL value +UPDATE summary_table SET average_value = average_query.average FROM ( + SELECT avg(value) AS average FROM raw_table WHERE id = 1 AND id = 4 + ) average_query +WHERE id = 1; +SELECT * FROM summary_table ORDER BY id; + id | min_value | average_value | count | uniques +----+-----------+----------------------+-------+--------- + 1 | 100 | | | + 2 | 400 | 450.0000000000000000 | | +(2 rows) + +-- multi-shard updates with recursively planned subqueries +BEGIN; +UPDATE summary_table SET average_value = average_query.average FROM ( + SELECT avg(value) AS average FROM raw_table) average_query; +ROLLBACK; +BEGIN; +UPDATE summary_table SET average_value = average_value + 1 WHERE id = + (SELECT id FROM raw_table WHERE value > 100 LIMIT 1); +ROLLBACK; +-- test complex queries +UPDATE summary_table +SET + uniques = metrics.expensive_uniques, + count = metrics.total_count +FROM + (SELECT + id, + count(DISTINCT (CASE WHEN value > 100 then value end)) AS expensive_uniques, + count(value) AS total_count + FROM raw_table + WHERE id = 1 + GROUP BY id) metrics +WHERE + summary_table.id = metrics.id AND + summary_table.id = 1; +SELECT * FROM summary_table ORDER BY id; + id | min_value | average_value | count | uniques +----+-----------+----------------------+-------+--------- + 1 | 100 | | 4 | 2 + 2 | 400 | 450.0000000000000000 | | +(2 rows) + +-- test joins +UPDATE summary_table SET count = count + 1 FROM raw_table + WHERE raw_table.id = summary_table.id AND summary_table.id = 1; +SELECT * FROM summary_table ORDER BY id; + id | min_value | average_value | count | uniques +----+-----------+----------------------+-------+--------- + 1 | 100 | | 5 | 2 + 2 | 400 | 450.0000000000000000 | | +(2 rows) + +-- test with prepared statements +PREPARE prepared_update_with_subquery(int, int) AS + UPDATE summary_table SET count = count + $1 FROM raw_table + WHERE raw_table.id = summary_table.id AND summary_table.id = $2; +-- execute 6 times to trigger prepared statement usage +EXECUTE prepared_update_with_subquery(10, 1); +EXECUTE prepared_update_with_subquery(10, 1); +EXECUTE prepared_update_with_subquery(10, 1); +EXECUTE prepared_update_with_subquery(10, 1); +EXECUTE prepared_update_with_subquery(10, 1); +EXECUTE prepared_update_with_subquery(10, 1); +SELECT * FROM summary_table ORDER BY id; + id | min_value | average_value | count | uniques +----+-----------+----------------------+-------+--------- + 1 | 100 | | 65 | 2 + 2 | 400 | 450.0000000000000000 | | +(2 rows) + +-- test with reference tables +CREATE TABLE reference_raw_table (id bigint, value bigint); +CREATE TABLE reference_summary_table ( + id bigint, + min_value numeric, + average_value numeric, + count int, + uniques int); +SELECT create_reference_table('reference_raw_table'); + create_reference_table +------------------------ + +(1 row) + +SELECT create_reference_table('reference_summary_table'); + create_reference_table +------------------------ + +(1 row) + +INSERT INTO reference_raw_table VALUES (1, 100); +INSERT INTO reference_raw_table VALUES (1, 200); +INSERT INTO reference_raw_table VALUES (1, 200); +INSERT INTO reference_raw_table VALUES (1,300), (2, 400), (2,500) RETURNING *; + id | value +----+------- + 1 | 300 + 2 | 400 + 2 | 500 +(3 rows) + +INSERT INTO reference_summary_table VALUES (1); +INSERT INTO reference_summary_table VALUES (2); +SELECT * FROM reference_summary_table ORDER BY id; + id | min_value | average_value | count | uniques +----+-----------+---------------+-------+--------- + 1 | | | | + 2 | | | | +(2 rows) + +UPDATE reference_summary_table SET average_value = average_query.average FROM ( + SELECT avg(value) AS average FROM reference_raw_table WHERE id = 1 + ) average_query +WHERE id = 1; +UPDATE reference_summary_table SET (min_value, average_value) = + (SELECT min(value), avg(value) FROM reference_raw_table WHERE id = 2) +WHERE id = 2; +SELECT * FROM reference_summary_table ORDER BY id; + id | min_value | average_value | count | uniques +----+-----------+----------------------+-------+--------- + 1 | | 200.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | | +(2 rows) + +-- no need partition colum equalities on reference tables +UPDATE reference_summary_table SET (count) = + (SELECT id AS inner_id FROM reference_raw_table WHERE value = 500) +WHERE min_value = 400; +SELECT * FROM reference_summary_table ORDER BY id; + id | min_value | average_value | count | uniques +----+-----------+----------------------+-------+--------- + 1 | | 200.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | 2 | +(2 rows) + +-- can read from a reference table and update a distributed table +UPDATE summary_table SET average_value = average_query.average FROM ( + SELECT avg(value) AS average FROM reference_raw_table WHERE id = 1 + ) average_query +WHERE id = 1; +-- cannot read from a distributed table and update a reference table +UPDATE reference_summary_table SET average_value = average_query.average FROM ( + SELECT avg(value) AS average FROM raw_table WHERE id = 1 + ) average_query +WHERE id = 1; +ERROR: cannot perform select on a distributed table and modify a reference table +UPDATE reference_summary_table SET average_value = average_query.average FROM ( + SELECT avg(value) AS average FROM raw_table WHERE id = 1 AND id = 2 + ) average_query +WHERE id = 1; +ERROR: cannot perform select on a distributed table and modify a reference table +-- test master_modify_multiple_shards() with subqueries and expect to fail +SELECT master_modify_multiple_shards(' + UPDATE summary_table SET average_value = average_query.average FROM ( + SELECT avg(value) AS average FROM raw_table WHERE id = 1 + ) average_query + WHERE id = 1'); +ERROR: cannot run multi shard modify query with master_modify_multiple_shards when the query involves subquery or join +DETAIL: Execute the query without using master_modify_multiple_shards() +-- test connection API via using COPY +-- COPY on SELECT part +BEGIN; +\COPY raw_table FROM STDIN WITH CSV +INSERT INTO summary_table VALUES (3); +UPDATE summary_table SET average_value = average_query.average FROM ( + SELECT avg(value) AS average FROM raw_table WHERE id = 3 + ) average_query +WHERE id = 3; +COMMIT; +SELECT * FROM summary_table ORDER BY id; + id | min_value | average_value | count | uniques +----+-----------+----------------------+-------+--------- + 1 | 100 | 200.0000000000000000 | 65 | 2 + 2 | 400 | 450.0000000000000000 | | + 3 | | 150.0000000000000000 | | +(3 rows) + +-- COPY on UPDATE part +BEGIN; +INSERT INTO raw_table VALUES (4, 100); +INSERT INTO raw_table VALUES (4, 200); +\COPY summary_table FROM STDIN WITH CSV +UPDATE summary_table SET average_value = average_query.average FROM ( + SELECT avg(value) AS average FROM raw_table WHERE id = 4 + ) average_query +WHERE id = 4; +COMMIT; +SELECT * FROM summary_table ORDER BY id; + id | min_value | average_value | count | uniques +----+-----------+----------------------+-------+--------- + 1 | 100 | 200.0000000000000000 | 65 | 2 + 2 | 400 | 450.0000000000000000 | | + 3 | | 150.0000000000000000 | | + 4 | | 150.0000000000000000 | | +(4 rows) + +-- COPY on both part +BEGIN; +\COPY raw_table FROM STDIN WITH CSV +\COPY summary_table FROM STDIN WITH CSV +UPDATE summary_table SET average_value = average_query.average FROM ( + SELECT avg(value) AS average FROM raw_table WHERE id = 5 + ) average_query +WHERE id = 5; +COMMIT; +SELECT * FROM summary_table ORDER BY id; + id | min_value | average_value | count | uniques +----+-----------+----------------------+-------+--------- + 1 | 100 | 200.0000000000000000 | 65 | 2 + 2 | 400 | 450.0000000000000000 | | + 3 | | 150.0000000000000000 | | + 4 | | 150.0000000000000000 | | + 5 | | 150.0000000000000000 | | +(5 rows) + +-- COPY on reference tables +BEGIN; +\COPY reference_raw_table FROM STDIN WITH CSV +\COPY summary_table FROM STDIN WITH CSV +UPDATE summary_table SET average_value = average_query.average FROM ( + SELECT avg(value) AS average FROM reference_raw_table WHERE id = 6 + ) average_query +WHERE id = 6; +COMMIT; +SELECT * FROM summary_table ORDER BY id; + id | min_value | average_value | count | uniques +----+-----------+----------------------+-------+--------- + 1 | 100 | 200.0000000000000000 | 65 | 2 + 2 | 400 | 450.0000000000000000 | | + 3 | | 150.0000000000000000 | | + 4 | | 150.0000000000000000 | | + 5 | | 150.0000000000000000 | | + 6 | | 150.0000000000000000 | | +(6 rows) + +-- test DELETE queries +SELECT * FROM raw_table ORDER BY id, value; + id | value +----+------- + 1 | 100 + 1 | 200 + 1 | 200 + 1 | 300 + 2 | 400 + 2 | 500 + 3 | 100 + 3 | 200 + 4 | 100 + 4 | 200 + 5 | 100 + 5 | 200 +(12 rows) + +DELETE FROM summary_table + WHERE min_value IN (SELECT value FROM raw_table WHERE id = 1) AND id = 1; +SELECT * FROM summary_table ORDER BY id; + id | min_value | average_value | count | uniques +----+-----------+----------------------+-------+--------- + 2 | 400 | 450.0000000000000000 | | + 3 | | 150.0000000000000000 | | + 4 | | 150.0000000000000000 | | + 5 | | 150.0000000000000000 | | + 6 | | 150.0000000000000000 | | +(5 rows) + +-- test with different syntax +DELETE FROM summary_table USING raw_table + WHERE summary_table.id = raw_table.id AND raw_table.id = 2; +SELECT * FROM summary_table ORDER BY id; + id | min_value | average_value | count | uniques +----+-----------+----------------------+-------+--------- + 3 | | 150.0000000000000000 | | + 4 | | 150.0000000000000000 | | + 5 | | 150.0000000000000000 | | + 6 | | 150.0000000000000000 | | +(4 rows) + +-- cannot read from a distributed table and delete from a reference table +DELETE FROM reference_summary_table USING raw_table + WHERE reference_summary_table.id = raw_table.id AND raw_table.id = 3; +ERROR: cannot perform select on a distributed table and modify a reference table +SELECT * FROM summary_table ORDER BY id; + id | min_value | average_value | count | uniques +----+-----------+----------------------+-------+--------- + 3 | | 150.0000000000000000 | | + 4 | | 150.0000000000000000 | | + 5 | | 150.0000000000000000 | | + 6 | | 150.0000000000000000 | | +(4 rows) + +-- test connection API via using COPY with DELETEs +BEGIN; +\COPY summary_table FROM STDIN WITH CSV +DELETE FROM summary_table USING raw_table + WHERE summary_table.id = raw_table.id AND raw_table.id = 1; +DELETE FROM summary_table USING reference_raw_table + WHERE summary_table.id = reference_raw_table.id AND reference_raw_table.id = 2; +COMMIT; +SELECT * FROM summary_table ORDER BY id; + id | min_value | average_value | count | uniques +----+-----------+----------------------+-------+--------- + 3 | | 150.0000000000000000 | | + 4 | | 150.0000000000000000 | | + 5 | | 150.0000000000000000 | | + 6 | | 150.0000000000000000 | | +(4 rows) + +-- test DELETEs with prepared statements +PREPARE prepared_delete_with_join(int) AS + DELETE FROM summary_table USING raw_table + WHERE summary_table.id = raw_table.id AND raw_table.id = $1; +INSERT INTO raw_table VALUES (6, 100); +-- execute 6 times to trigger prepared statement usage +EXECUTE prepared_delete_with_join(1); +EXECUTE prepared_delete_with_join(2); +EXECUTE prepared_delete_with_join(3); +EXECUTE prepared_delete_with_join(4); +EXECUTE prepared_delete_with_join(5); +EXECUTE prepared_delete_with_join(6); +SELECT * FROM summary_table ORDER BY id; + id | min_value | average_value | count | uniques +----+-----------+---------------+-------+--------- +(0 rows) + +-- we don't support subqueries in VALUES clause +INSERT INTO summary_table (id) VALUES ((SELECT id FROM summary_table)); +ERROR: subqueries are not supported within INSERT queries +HINT: Try rewriting your queries with 'INSERT INTO ... SELECT' syntax. +INSERT INTO summary_table (id) VALUES (5), ((SELECT id FROM summary_table)); +ERROR: subqueries are not supported within INSERT queries +HINT: Try rewriting your queries with 'INSERT INTO ... SELECT' syntax. +-- similar queries with reference tables +INSERT INTO reference_summary_table (id) VALUES ((SELECT id FROM summary_table)); +ERROR: subqueries are not supported within INSERT queries +HINT: Try rewriting your queries with 'INSERT INTO ... SELECT' syntax. +INSERT INTO summary_table (id) VALUES ((SELECT id FROM reference_summary_table)); +ERROR: subqueries are not supported within INSERT queries +HINT: Try rewriting your queries with 'INSERT INTO ... SELECT' syntax. +DROP TABLE raw_table; +DROP TABLE summary_table; +DROP TABLE reference_raw_table; +DROP TABLE reference_summary_table; diff --git a/src/test/regress/expected/multi_mx_create_table.out b/src/test/regress/expected/multi_mx_create_table.out index 59e8e9ded..7eff736b1 100644 --- a/src/test/regress/expected/multi_mx_create_table.out +++ b/src/test/regress/expected/multi_mx_create_table.out @@ -466,28 +466,28 @@ FROM pg_dist_partition NATURAL JOIN shard_counts ORDER BY colocationid, logicalrelid; logicalrelid | colocationid | shard_count | partmethod | repmodel --------------------------------------------------------+--------------+-------------+------------+---------- - citus_mx_test_schema_join_1.nation_hash | 2 | 4 | h | s - citus_mx_test_schema_join_1.nation_hash_2 | 2 | 4 | h | s - citus_mx_test_schema_join_2.nation_hash | 2 | 4 | h | s - citus_mx_test_schema.nation_hash_collation_search_path | 2 | 4 | h | s - citus_mx_test_schema.nation_hash_composite_types | 2 | 4 | h | s - mx_ddl_table | 2 | 4 | h | s - app_analytics_events_mx | 2 | 4 | h | s - company_employees_mx | 2 | 4 | h | s - nation_hash | 3 | 16 | h | s - citus_mx_test_schema.nation_hash | 3 | 16 | h | s - lineitem_mx | 4 | 16 | h | s - orders_mx | 4 | 16 | h | s - customer_mx | 5 | 1 | n | t - nation_mx | 5 | 1 | n | t - part_mx | 5 | 1 | n | t - supplier_mx | 5 | 1 | n | t - limit_orders_mx | 6 | 2 | h | s - articles_hash_mx | 6 | 2 | h | s - multiple_hash_mx | 7 | 2 | h | s - researchers_mx | 8 | 2 | h | s - labs_mx | 9 | 1 | h | s - objects_mx | 9 | 1 | h | s - articles_single_shard_hash_mx | 9 | 1 | h | s + citus_mx_test_schema_join_1.nation_hash | 3 | 4 | h | s + citus_mx_test_schema_join_1.nation_hash_2 | 3 | 4 | h | s + citus_mx_test_schema_join_2.nation_hash | 3 | 4 | h | s + citus_mx_test_schema.nation_hash_collation_search_path | 3 | 4 | h | s + citus_mx_test_schema.nation_hash_composite_types | 3 | 4 | h | s + mx_ddl_table | 3 | 4 | h | s + app_analytics_events_mx | 3 | 4 | h | s + company_employees_mx | 3 | 4 | h | s + nation_hash | 4 | 16 | h | s + citus_mx_test_schema.nation_hash | 4 | 16 | h | s + lineitem_mx | 5 | 16 | h | s + orders_mx | 5 | 16 | h | s + customer_mx | 6 | 1 | n | t + nation_mx | 6 | 1 | n | t + part_mx | 6 | 1 | n | t + supplier_mx | 6 | 1 | n | t + limit_orders_mx | 7 | 2 | h | s + articles_hash_mx | 7 | 2 | h | s + multiple_hash_mx | 8 | 2 | h | s + researchers_mx | 9 | 2 | h | s + labs_mx | 10 | 1 | h | s + objects_mx | 10 | 1 | h | s + articles_single_shard_hash_mx | 10 | 1 | h | s (23 rows) diff --git a/src/test/regress/expected/multi_name_lengths.out b/src/test/regress/expected/multi_name_lengths.out index 7cf9d11d7..1e20b564a 100644 --- a/src/test/regress/expected/multi_name_lengths.out +++ b/src/test/regress/expected/multi_name_lengths.out @@ -3,6 +3,7 @@ -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 225000; SET citus.multi_shard_commit_protocol = '2pc'; +SET citus.shard_count TO 2; -- Verify that a table name > 56 characters gets hashed properly. CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 ( col1 integer not null, @@ -29,6 +30,8 @@ SELECT master_create_worker_shards('too_long_12345678901234567890123456789012345 (2 rows) \c - - - :master_port +SET citus.shard_count TO 2; +SET citus.shard_replication_factor TO 2; -- Verify that the UDF works and rejects bad arguments. SELECT shard_name(NULL, 666666); shard_name @@ -59,15 +62,9 @@ CREATE TABLE name_lengths ( col2 integer not null, constraint constraint_a UNIQUE (col1) ); -SELECT master_create_distributed_table('name_lengths', 'col1', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('name_lengths', '2', '2'); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('name_lengths', 'col1', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -165,6 +162,8 @@ Index "public.tmp_idx_123456789012345678901234567890123456789_5e470afa_225003" btree, for table "public.name_lengths_225003" \c - - - :master_port +SET citus.shard_count TO 2; +SET citus.shard_replication_factor TO 2; -- Verify that distributed tables with too-long names -- for CHECK constraints are no trouble. CREATE TABLE sneaky_name_lengths ( @@ -173,15 +172,9 @@ CREATE TABLE sneaky_name_lengths ( int_col_12345678901234567890123456789012345678901234567890 integer not null, CHECK (int_col_12345678901234567890123456789012345678901234567890 > 100) ); -SELECT master_create_distributed_table('sneaky_name_lengths', 'col1', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2'); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('sneaky_name_lengths', 'col1', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -231,6 +224,8 @@ SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.sneaky_n (1 row) \c - - - :master_port +SET citus.shard_count TO 2; +SET citus.shard_replication_factor TO 2; DROP TABLE sneaky_name_lengths CASCADE; -- verify that named constraint with too-long name gets hashed properly CREATE TABLE sneaky_name_lengths ( @@ -239,15 +234,9 @@ CREATE TABLE sneaky_name_lengths ( int_col_12345678901234567890123456789012345678901234567890 integer not null, constraint unique_12345678901234567890123456789012345678901234567890 UNIQUE (col1) ); -SELECT master_create_distributed_table('sneaky_name_lengths', 'col1', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2'); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('sneaky_name_lengths', 'col1', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -260,21 +249,17 @@ SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2'); (1 row) \c - - - :master_port +SET citus.shard_count TO 2; +SET citus.shard_replication_factor TO 2; DROP TABLE sneaky_name_lengths CASCADE; -- Verify that much larger shardIds are handled properly ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 2250000000000; CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 ( col1 integer not null, col2 integer not null); -SELECT master_create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('too_long_12345678901234567890123456789012345678901234567890', '2', '2'); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -288,20 +273,16 @@ SELECT master_create_worker_shards('too_long_12345678901234567890123456789012345 (2 rows) \c - - - :master_port +SET citus.shard_count TO 2; +SET citus.shard_replication_factor TO 2; DROP TABLE too_long_12345678901234567890123456789012345678901234567890 CASCADE; -- Verify that multi-byte boundaries are respected for databases with UTF8 encoding. CREATE TABLE U&"elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D" UESCAPE '!' ( col1 integer not null PRIMARY KEY, col2 integer not null); -SELECT master_create_distributed_table(U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!', 'col1', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards(U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!', '2', '2'); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table(U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!', 'col1', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -332,20 +313,16 @@ WHERE logicalrelid = U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!0 (2 rows) \c - - - :master_port +SET citus.shard_count TO 2; +SET citus.shard_replication_factor TO 2; -- Verify that shard_name UDF supports schemas CREATE SCHEMA multi_name_lengths; CREATE TABLE multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890 ( col1 integer not null, col2 integer not null); -SELECT master_create_distributed_table('multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890', 2, 1); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_partition_pruning.out b/src/test/regress/expected/multi_partition_pruning.out index efa5262cd..4ddecc47a 100644 --- a/src/test/regress/expected/multi_partition_pruning.out +++ b/src/test/regress/expected/multi_partition_pruning.out @@ -57,9 +57,9 @@ CREATE TABLE varchar_partitioned_table ( varchar_column varchar(100) ); -SELECT master_create_distributed_table('varchar_partitioned_table', 'varchar_column', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('varchar_partitioned_table', 'varchar_column', 'append'); + create_distributed_table +-------------------------- (1 row) @@ -85,9 +85,9 @@ CREATE TABLE array_partitioned_table ( array_column text[] ); -SELECT master_create_distributed_table('array_partitioned_table', 'array_column', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('array_partitioned_table', 'array_column', 'append'); + create_distributed_table +-------------------------- (1 row) @@ -121,9 +121,9 @@ CREATE TABLE composite_partitioned_table ( composite_column composite_type ); -SELECT master_create_distributed_table('composite_partitioned_table', 'composite_column', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('composite_partitioned_table', 'composite_column', 'append'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_prepare_plsql.out b/src/test/regress/expected/multi_prepare_plsql.out index 1d86f035a..72f98a03c 100644 --- a/src/test/regress/expected/multi_prepare_plsql.out +++ b/src/test/regress/expected/multi_prepare_plsql.out @@ -280,15 +280,10 @@ CREATE TABLE plpgsql_table ( key int, value int ); -SELECT master_create_distributed_table('plpgsql_table','key','hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('plpgsql_table',4,1); - master_create_worker_shards ------------------------------ +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('plpgsql_table','key','hash'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_prepare_sql.out b/src/test/regress/expected/multi_prepare_sql.out index ec9204301..53d9d49f5 100644 --- a/src/test/regress/expected/multi_prepare_sql.out +++ b/src/test/regress/expected/multi_prepare_sql.out @@ -255,15 +255,10 @@ CREATE TABLE router_executor_table ( comment varchar(20), stats test_composite_type ); -SELECT master_create_distributed_table('router_executor_table', 'id', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('router_executor_table', 2, 2); - master_create_worker_shards ------------------------------ +SET citus.shard_count TO 2; +SELECT create_distributed_table('router_executor_table', 'id', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -344,15 +339,11 @@ CREATE TABLE prepare_table ( key int, value int ); -SELECT master_create_distributed_table('prepare_table','key','hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('prepare_table',4,1); - master_create_worker_shards ------------------------------ +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('prepare_table','key','hash'); + create_distributed_table +-------------------------- (1 row) @@ -1049,15 +1040,11 @@ CREATE OR REPLACE FUNCTION immutable_bleat(text) RETURNS int LANGUAGE plpgsql IM \c - - - :master_port -- test table CREATE TABLE test_table (test_id integer NOT NULL, data text); -SELECT master_create_distributed_table('test_table', 'test_id', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('test_table', 2, 2); - master_create_worker_shards ------------------------------ +SET citus.shard_count TO 2; +SET citus.shard_replication_factor TO 2; +SELECT create_distributed_table('test_table', 'test_id', 'hash'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_prune_shard_list.out b/src/test/regress/expected/multi_prune_shard_list.out index b9baaf36b..f39264127 100644 --- a/src/test/regress/expected/multi_prune_shard_list.out +++ b/src/test/regress/expected/multi_prune_shard_list.out @@ -31,16 +31,10 @@ CREATE FUNCTION print_sorted_shard_intervals(regclass) -- =================================================================== -- create distributed table observe shard pruning CREATE TABLE pruning ( species text, last_pruned date, plant_id integer ); -SELECT master_create_distributed_table('pruning', 'species', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - --- create worker shards -SELECT master_create_worker_shards('pruning', 4, 1); - master_create_worker_shards ------------------------------ +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('pruning', 'species', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -134,9 +128,9 @@ SELECT print_sorted_shard_intervals('pruning'); -- create range distributed table observe shard pruning CREATE TABLE pruning_range ( species text, last_pruned date, plant_id integer ); -SELECT master_create_distributed_table('pruning_range', 'species', 'range'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('pruning_range', 'species', 'range'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_repair_shards.out b/src/test/regress/expected/multi_repair_shards.out index 01753b77c..89617f1e3 100644 --- a/src/test/regress/expected/multi_repair_shards.out +++ b/src/test/regress/expected/multi_repair_shards.out @@ -11,16 +11,12 @@ CREATE INDEX ON customer_engagements (id); CREATE INDEX ON customer_engagements (created_at); CREATE INDEX ON customer_engagements (event_data); -- distribute the table -SELECT master_create_distributed_table('customer_engagements', 'id', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -- create a single shard on the first worker -SELECT master_create_worker_shards('customer_engagements', 1, 2); - master_create_worker_shards ------------------------------ +SET citus.shard_count TO 1; +SET citus.shard_replication_factor TO 2; +SELECT create_distributed_table('customer_engagements', 'id', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -105,17 +101,13 @@ CREATE FOREIGN TABLE remote_engagements ( event_data text ) SERVER fake_fdw_server; -- distribute the table -SELECT master_create_distributed_table('remote_engagements', 'id', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -- create a single shard on the first worker -SELECT master_create_worker_shards('remote_engagements', 1, 2); +SET citus.shard_count TO 1; +SET citus.shard_replication_factor TO 2; +SELECT create_distributed_table('remote_engagements', 'id', 'hash'); NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined - master_create_worker_shards ------------------------------ + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_repartition_udt.out b/src/test/regress/expected/multi_repartition_udt.out index 88ee2ffdb..2d98f3096 100644 --- a/src/test/regress/expected/multi_repartition_udt.out +++ b/src/test/regress/expected/multi_repartition_udt.out @@ -127,27 +127,18 @@ FUNCTION 1 test_udt_hash(test_udt); -- Connect to master \c - - - :master_port -- Distribute and populate the two tables. -SELECT master_create_distributed_table('repartition_udt', 'pk', 'hash'); - master_create_distributed_table ---------------------------------- +SET citus.shard_count TO 3; +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('repartition_udt', 'pk', 'hash'); + create_distributed_table +-------------------------- (1 row) -SELECT master_create_worker_shards('repartition_udt', 3, 1); - master_create_worker_shards ------------------------------ - -(1 row) - -SELECT master_create_distributed_table('repartition_udt_other', 'pk', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('repartition_udt_other', 5, 1); - master_create_worker_shards ------------------------------ +SET citus.shard_count TO 5; +SELECT create_distributed_table('repartition_udt_other', 'pk', 'hash'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_replicate_reference_table.out b/src/test/regress/expected/multi_replicate_reference_table.out index fd5d773b2..e51b4c47f 100644 --- a/src/test/regress/expected/multi_replicate_reference_table.out +++ b/src/test/regress/expected/multi_replicate_reference_table.out @@ -394,7 +394,7 @@ ORDER BY logicalrelid; logicalrelid | partmethod | colocationid | repmodel -----------------------------------------+------------+--------------+---------- replicate_reference_table_reference_one | n | 1370002 | t - replicate_reference_table_hash | h | 1360005 | c + replicate_reference_table_hash | h | 1360004 | c (2 rows) BEGIN; diff --git a/src/test/regress/expected/multi_shard_modify.out b/src/test/regress/expected/multi_shard_modify.out index 2c0a32c5b..ca28ea617 100644 --- a/src/test/regress/expected/multi_shard_modify.out +++ b/src/test/regress/expected/multi_shard_modify.out @@ -7,15 +7,9 @@ CREATE TABLE multi_shard_modify_test ( t_key integer not null, t_name varchar(25) not null, t_value integer not null); -SELECT master_create_distributed_table('multi_shard_modify_test', 't_key', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('multi_shard_modify_test', 4, 2); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('multi_shard_modify_test', 't_key', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -71,15 +65,9 @@ CREATE TABLE temp_nations(name text, key integer); SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test USING temp_nations WHERE multi_shard_modify_test.t_value = temp_nations.key AND temp_nations.name = ''foobar'' '); ERROR: relation temp_nations is not distributed -- commands with a USING clause are unsupported -SELECT master_create_distributed_table('temp_nations', 'name', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('temp_nations', 4, 2); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('temp_nations', 'name', 'hash'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_sql_function.out b/src/test/regress/expected/multi_sql_function.out index c1fd96e33..4b06b11aa 100644 --- a/src/test/regress/expected/multi_sql_function.out +++ b/src/test/regress/expected/multi_sql_function.out @@ -81,15 +81,10 @@ CREATE TABLE temp_table ( key int, value int ); -SELECT master_create_distributed_table('temp_table','key','hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('temp_table',4,1); - master_create_worker_shards ------------------------------ +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('temp_table','key','hash'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_subquery.out b/src/test/regress/expected/multi_subquery.out index 9c7d872ba..e797098ec 100644 --- a/src/test/regress/expected/multi_subquery.out +++ b/src/test/regress/expected/multi_subquery.out @@ -794,15 +794,10 @@ CREATE TABLE subquery_pruning_varchar_test_table a varchar, b int ); -SELECT master_create_distributed_table('subquery_pruning_varchar_test_table', 'a', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('subquery_pruning_varchar_test_table', 4, 1); - master_create_worker_shards ------------------------------ +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('subquery_pruning_varchar_test_table', 'a', 'hash'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_subtransactions.out b/src/test/regress/expected/multi_subtransactions.out index 45dedde93..d28d62bce 100644 --- a/src/test/regress/expected/multi_subtransactions.out +++ b/src/test/regress/expected/multi_subtransactions.out @@ -137,15 +137,10 @@ CREATE TABLE researchers ( lab_id int NOT NULL, name text NOT NULL ); -SELECT master_create_distributed_table('researchers', 'lab_id', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('researchers', 2, 2); - master_create_worker_shards ------------------------------ +SET citus.shard_count TO 2; +SELECT create_distributed_table('researchers', 'lab_id', 'hash'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_subtransactions_0.out b/src/test/regress/expected/multi_subtransactions_0.out index b21697b05..74765de10 100644 --- a/src/test/regress/expected/multi_subtransactions_0.out +++ b/src/test/regress/expected/multi_subtransactions_0.out @@ -137,15 +137,10 @@ CREATE TABLE researchers ( lab_id int NOT NULL, name text NOT NULL ); -SELECT master_create_distributed_table('researchers', 'lab_id', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('researchers', 2, 2); - master_create_worker_shards ------------------------------ +SET citus.shard_count TO 2; +SELECT create_distributed_table('researchers', 'lab_id', 'hash'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_table_ddl.out b/src/test/regress/expected/multi_table_ddl.out index 87b3ce17f..7fe9af2e9 100644 --- a/src/test/regress/expected/multi_table_ddl.out +++ b/src/test/regress/expected/multi_table_ddl.out @@ -4,9 +4,9 @@ -- Tests around changing the schema and dropping of a distributed table SET citus.next_shard_id TO 870000; CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); -SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('testtableddl', 'distributecol', 'append'); + create_distributed_table +-------------------------- (1 row) @@ -29,9 +29,9 @@ COMMIT; \set VERBOSITY default -- recreate testtableddl CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); -SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('testtableddl', 'distributecol', 'append'); + create_distributed_table +-------------------------- (1 row) @@ -41,9 +41,9 @@ DROP TABLE testtableddl; CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); -- create table and do create empty shard test here, too SET citus.shard_replication_factor TO 1; -SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('testtableddl', 'distributecol', 'append'); + create_distributed_table +-------------------------- (1 row) @@ -90,15 +90,11 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port); -- create a table with a SERIAL column CREATE TABLE testserialtable(id serial, group_id integer); -SELECT master_create_distributed_table('testserialtable', 'group_id', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('testserialtable', 2, 1); - master_create_worker_shards ------------------------------ +SET citus.shard_count TO 2; +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('testserialtable', 'group_id', 'hash'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_task_assignment_policy.out b/src/test/regress/expected/multi_task_assignment_policy.out index dd0d2f4bf..a73267bc6 100644 --- a/src/test/regress/expected/multi_task_assignment_policy.out +++ b/src/test/regress/expected/multi_task_assignment_policy.out @@ -16,9 +16,9 @@ SET citus.explain_distributed_queries TO off; -- and shard placement data into system catalogs. We next run Explain command, -- and check that tasks are assigned to worker nodes as expected. CREATE TABLE task_assignment_test_table (test_id integer); -SELECT master_create_distributed_table('task_assignment_test_table', 'test_id', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('task_assignment_test_table', 'test_id', 'append'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_task_assignment_policy_0.out b/src/test/regress/expected/multi_task_assignment_policy_0.out index fb4ffda24..36b52ed8c 100644 --- a/src/test/regress/expected/multi_task_assignment_policy_0.out +++ b/src/test/regress/expected/multi_task_assignment_policy_0.out @@ -16,9 +16,9 @@ SET citus.explain_distributed_queries TO off; -- and shard placement data into system catalogs. We next run Explain command, -- and check that tasks are assigned to worker nodes as expected. CREATE TABLE task_assignment_test_table (test_id integer); -SELECT master_create_distributed_table('task_assignment_test_table', 'test_id', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('task_assignment_test_table', 'test_id', 'append'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_truncate.out b/src/test/regress/expected/multi_truncate.out index 4368b3524..2dba47eaf 100644 --- a/src/test/regress/expected/multi_truncate.out +++ b/src/test/regress/expected/multi_truncate.out @@ -7,9 +7,9 @@ SET citus.next_shard_id TO 1210000; -- expect all shards to be dropped -- CREATE TABLE test_truncate_append(a int); -SELECT master_create_distributed_table('test_truncate_append', 'a', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('test_truncate_append', 'a', 'append'); + create_distributed_table +-------------------------- (1 row) @@ -75,9 +75,9 @@ DROP TABLE test_truncate_append; -- expect shard to be present, data to be truncated -- CREATE TABLE test_truncate_range(a int); -SELECT master_create_distributed_table('test_truncate_range', 'a', 'range'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('test_truncate_range', 'a', 'range'); + create_distributed_table +-------------------------- (1 row) @@ -237,16 +237,11 @@ SELECT count(*) FROM test_truncate_hash; DROP TABLE test_truncate_hash; -- test with table with spaces in it +SET citus.shard_replication_factor TO 1; CREATE TABLE "a b hash" (a int, b int); -SELECT master_create_distributed_table('"a b hash"', 'a', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('"a b hash"', 4, 1); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('"a b hash"', 'a', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -266,9 +261,9 @@ SELECT * from "a b hash"; DROP TABLE "a b hash"; -- now with append CREATE TABLE "a b append" (a int, b int); -SELECT master_create_distributed_table('"a b append"', 'a', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('"a b append"', 'a', 'append'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_upgrade_reference_table.out b/src/test/regress/expected/multi_upgrade_reference_table.out index eba553673..6a0ceabdd 100644 --- a/src/test/regress/expected/multi_upgrade_reference_table.out +++ b/src/test/regress/expected/multi_upgrade_reference_table.out @@ -240,7 +240,7 @@ WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass; partmethod | partkeyisnull | colocationid | repmodel ------------+---------------+--------------+---------- - h | f | 1360001 | c + h | f | 1360000 | c (1 row) SELECT @@ -262,7 +262,7 @@ WHERE colocationid IN WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ - 1360001 | 1 | 1 | 23 + 1360000 | 1 | 1 | 23 (1 row) SELECT @@ -354,7 +354,7 @@ WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass; partmethod | partkeyisnull | colocationid | repmodel ------------+---------------+--------------+---------- - h | f | 1360002 | c + h | f | 1360001 | c (1 row) SELECT @@ -376,7 +376,7 @@ WHERE colocationid IN WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ - 1360002 | 1 | 2 | 23 + 1360001 | 1 | 2 | 23 (1 row) SELECT @@ -468,7 +468,7 @@ WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass; partmethod | partkeyisnull | colocationid | repmodel ------------+---------------+--------------+---------- - h | f | 1360003 | c + h | f | 1360002 | c (1 row) SELECT @@ -490,7 +490,7 @@ WHERE colocationid IN WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ - 1360003 | 1 | 2 | 23 + 1360002 | 1 | 2 | 23 (1 row) SELECT @@ -584,7 +584,7 @@ WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass; partmethod | partkeyisnull | colocationid | repmodel ------------+---------------+--------------+---------- - h | f | 1360004 | c + h | f | 1360003 | c (1 row) SELECT @@ -606,7 +606,7 @@ WHERE colocationid IN WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ - 1360004 | 1 | 1 | 23 + 1360003 | 1 | 1 | 23 (1 row) SELECT @@ -639,7 +639,7 @@ WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass; partmethod | partkeyisnull | colocationid | repmodel ------------+---------------+--------------+---------- - h | f | 1360004 | c + h | f | 1360003 | c (1 row) SELECT @@ -661,7 +661,7 @@ WHERE colocationid IN WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ - 1360004 | 1 | 1 | 23 + 1360003 | 1 | 1 | 23 (1 row) SELECT @@ -697,7 +697,7 @@ WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass; partmethod | partkeyisnull | colocationid | repmodel ------------+---------------+--------------+---------- - h | f | 1360004 | c + h | f | 1360003 | c (1 row) SELECT @@ -719,7 +719,7 @@ WHERE colocationid IN WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ - 1360004 | 1 | 1 | 23 + 1360003 | 1 | 1 | 23 (1 row) SELECT @@ -823,7 +823,7 @@ WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; partmethod | partkeyisnull | colocationid | repmodel ------------+---------------+--------------+---------- - h | f | 1360005 | s + h | f | 1360004 | s (1 row) SELECT @@ -845,7 +845,7 @@ WHERE colocationid IN WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ - 1360005 | 1 | 1 | 23 + 1360004 | 1 | 1 | 23 (1 row) SELECT @@ -875,7 +875,7 @@ WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; partmethod | partkeyisnull | colocationid | repmodel ------------+---------------+--------------+---------- - h | f | 1360005 | s + h | f | 1360004 | s (1 row) SELECT @@ -897,7 +897,7 @@ WHERE colocationid IN WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ - 1360005 | 1 | 1 | 23 + 1360004 | 1 | 1 | 23 (1 row) SELECT @@ -944,7 +944,7 @@ WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; partmethod | partkeyisnull | colocationid | repmodel ------------+---------------+--------------+---------- - h | f | 1360006 | c + h | f | 1360005 | c (1 row) SELECT @@ -966,7 +966,7 @@ WHERE colocationid IN WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ - 1360006 | 1 | 2 | 23 + 1360005 | 1 | 2 | 23 (1 row) SELECT diff --git a/src/test/regress/expected/multi_upsert.out b/src/test/regress/expected/multi_upsert.out index ef2808493..0d54719ea 100644 --- a/src/test/regress/expected/multi_upsert.out +++ b/src/test/regress/expected/multi_upsert.out @@ -7,15 +7,9 @@ CREATE TABLE upsert_test third_col int ); -- distribute the table and create shards -SELECT master_create_distributed_table('upsert_test', 'part_key', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('upsert_test', '4', '2'); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('upsert_test', 'part_key', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -147,15 +141,9 @@ CREATE TABLE upsert_test_2 PRIMARY KEY (part_key, other_col) ); -- distribute the table and create shards -SELECT master_create_distributed_table('upsert_test_2', 'part_key', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('upsert_test_2', '4', '2'); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('upsert_test_2', 'part_key', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -174,15 +162,9 @@ CREATE TABLE upsert_test_3 -- note that this is not a unique index CREATE INDEX idx_ups_test ON upsert_test_3(part_key); -- distribute the table and create shards -SELECT master_create_distributed_table('upsert_test_3', 'part_key', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('upsert_test_3', '4', '2'); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('upsert_test_3', 'part_key', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -196,15 +178,9 @@ CREATE TABLE upsert_test_4 count int ); -- distribute the table and create shards -SELECT master_create_distributed_table('upsert_test_4', 'part_key', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('upsert_test_4', '4', '2'); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('upsert_test_4', 'part_key', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -225,16 +201,11 @@ SELECT * FROM upsert_test_4; (1 row) -- now test dropped columns +SET citus.shard_replication_factor TO 1; CREATE TABLE dropcol_distributed(key int primary key, drop1 int, keep1 text, drop2 numeric, keep2 float); -SELECT master_create_distributed_table('dropcol_distributed', 'key', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('dropcol_distributed', 4, 1); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('dropcol_distributed', 'key', 'hash'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_utilities_0.out b/src/test/regress/expected/multi_utilities_0.out index 97e3e149e..90ccd92d7 100644 --- a/src/test/regress/expected/multi_utilities_0.out +++ b/src/test/regress/expected/multi_utilities_0.out @@ -10,16 +10,12 @@ SELECT substring(:'server_version', '\d+')::int > 10 as version_above_ten; -- =================================================================== -- test utility statement functionality -- =================================================================== +SET citus.shard_count TO 2; +SET citus.shard_replication_factor TO 1; CREATE TABLE sharded_table ( name text, id bigint ); -SELECT master_create_distributed_table('sharded_table', 'id', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('sharded_table', 2, 1); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('sharded_table', 'id', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -167,16 +163,12 @@ ERROR: no locks specified DROP TABLE sharded_table; -- VACUUM tests -- create a table with a single shard (for convenience) +SET citus.shard_count TO 1; +SET citus.shard_replication_factor TO 2; CREATE TABLE dustbunnies (id integer, name text, age integer); -SELECT master_create_distributed_table('dustbunnies', 'id', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('dustbunnies', 1, 2); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('dustbunnies', 'id', 'hash'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/input/multi_agg_distinct.source b/src/test/regress/input/multi_agg_distinct.source index 947b6133e..4a628d2df 100644 --- a/src/test/regress/input/multi_agg_distinct.source +++ b/src/test/regress/input/multi_agg_distinct.source @@ -21,7 +21,7 @@ CREATE TABLE lineitem_range ( l_shipinstruct char(25) not null, l_shipmode char(10) not null, l_comment varchar(44) not null ); -SELECT master_create_distributed_table('lineitem_range', 'l_orderkey', 'range'); +SELECT create_distributed_table('lineitem_range', 'l_orderkey', 'range'); SELECT master_create_empty_shard('lineitem_range') AS new_shard_id \gset @@ -88,8 +88,8 @@ CREATE TABLE lineitem_hash ( l_shipinstruct char(25) not null, l_shipmode char(10) not null, l_comment varchar(44) not null ); -SELECT master_create_distributed_table('lineitem_hash', 'l_orderkey', 'hash'); -SELECT master_create_worker_shards('lineitem_hash', 4, 1); +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('lineitem_hash', 'l_orderkey', 'hash'); \copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' \copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' diff --git a/src/test/regress/input/multi_agg_type_conversion.source b/src/test/regress/input/multi_agg_type_conversion.source index 80b08cb34..34daa58a0 100644 --- a/src/test/regress/input/multi_agg_type_conversion.source +++ b/src/test/regress/input/multi_agg_type_conversion.source @@ -18,7 +18,7 @@ CREATE TABLE aggregate_type ( float_value float(20) not null, double_value float(40) not null, interval_value interval not null); -SELECT master_create_distributed_table('aggregate_type', 'float_value', 'append'); +SELECT create_distributed_table('aggregate_type', 'float_value', 'append'); \copy aggregate_type FROM '@abs_srcdir@/data/agg_type.data' diff --git a/src/test/regress/input/multi_alter_table_statements.source b/src/test/regress/input/multi_alter_table_statements.source index 8c417e4fd..281f76691 100644 --- a/src/test/regress/input/multi_alter_table_statements.source +++ b/src/test/regress/input/multi_alter_table_statements.source @@ -28,7 +28,7 @@ CREATE TABLE lineitem_alter ( l_comment varchar(44) not null ) WITH ( fillfactor = 80 ); -SELECT master_create_distributed_table('lineitem_alter', 'l_orderkey', 'append'); +SELECT create_distributed_table('lineitem_alter', 'l_orderkey', 'append'); \copy lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -- verify that the storage options made it to the table definitions @@ -248,8 +248,9 @@ SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; -- Create single-shard table (to avoid deadlocks in the upcoming test hackery) CREATE TABLE single_shard_items (id integer NOT NULL, name text); -SELECT master_create_distributed_table('single_shard_items', 'id', 'hash'); -SELECT master_create_worker_shards('single_shard_items', 1, 2); +SET citus.shard_count TO 1; +SET citus.shard_replication_factor TO 2; +SELECT create_distributed_table('single_shard_items', 'id', 'hash'); -- Verify that ALTER TABLE .. REPLICATION IDENTITY [USING INDEX]* .. works CREATE UNIQUE INDEX replica_idx on single_shard_items(id); @@ -346,8 +347,8 @@ RESET citus.multi_shard_commit_protocol; -- verify that not any of shard placements are marked as failed when a query failure occurs CREATE TABLE test_ab (a int, b int); -SELECT master_create_distributed_table('test_ab', 'a', 'hash'); -SELECT master_create_worker_shards('test_ab', 8, 2); +SET citus.shard_count TO 8; +SELECT create_distributed_table('test_ab', 'a', 'hash'); INSERT INTO test_ab VALUES (2, 10); INSERT INTO test_ab VALUES (2, 11); CREATE UNIQUE INDEX temp_unique_index_1 ON test_ab(a); @@ -462,6 +463,8 @@ SELECT indexname, tablename FROM pg_indexes WHERE tablename like 'lineitem_alte \c - - - :master_port -- verify alter table and drop sequence in the same transaction does not cause deadlock +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 2; CREATE TABLE sequence_deadlock_test (a serial, b serial); SELECT create_distributed_table('sequence_deadlock_test', 'a'); diff --git a/src/test/regress/input/multi_append_table_to_shard.source b/src/test/regress/input/multi_append_table_to_shard.source index 0081bfa1b..77b442782 100644 --- a/src/test/regress/input/multi_append_table_to_shard.source +++ b/src/test/regress/input/multi_append_table_to_shard.source @@ -19,15 +19,16 @@ CREATE TABLE multi_append_table_to_shard_left left_number INTEGER not null, left_text TEXT not null ); -SELECT master_create_distributed_table('multi_append_table_to_shard_left', 'left_number', 'append'); +SELECT create_distributed_table('multi_append_table_to_shard_left', 'left_number', 'append'); CREATE TABLE multi_append_table_to_shard_right_reference_hash ( right_number INTEGER not null, right_text TEXT not null ); -SELECT master_create_distributed_table('multi_append_table_to_shard_right_reference_hash', 'right_number', 'hash'); -SELECT master_create_worker_shards('multi_append_table_to_shard_right_reference_hash', 1, 1); +SET citus.shard_count TO 1; +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('multi_append_table_to_shard_right_reference_hash', 'right_number', 'hash'); -- Replicate 'left' table on both workers SELECT set_config('citus.shard_replication_factor', '2', false); diff --git a/src/test/regress/input/multi_complex_count_distinct.source b/src/test/regress/input/multi_complex_count_distinct.source index 955f7993f..d2c2a8f79 100644 --- a/src/test/regress/input/multi_complex_count_distinct.source +++ b/src/test/regress/input/multi_complex_count_distinct.source @@ -4,7 +4,8 @@ SET citus.next_shard_id TO 240000; - +SET citus.shard_count TO 8; +SET citus.shard_replication_factor TO 1; CREATE TABLE lineitem_hash ( l_orderkey bigint not null, @@ -25,8 +26,7 @@ CREATE TABLE lineitem_hash ( l_comment varchar(44) not null, PRIMARY KEY(l_orderkey, l_linenumber) ); -SELECT master_create_distributed_table('lineitem_hash', 'l_orderkey', 'hash'); -SELECT master_create_worker_shards('lineitem_hash', 8, 1); +SELECT create_distributed_table('lineitem_hash', 'l_orderkey', 'hash'); \copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' \copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' diff --git a/src/test/regress/input/multi_copy.source b/src/test/regress/input/multi_copy.source index b8fc53f74..fa4fe9325 100644 --- a/src/test/regress/input/multi_copy.source +++ b/src/test/regress/input/multi_copy.source @@ -632,7 +632,7 @@ COPY numbers_hash FROM STDIN WITH (FORMAT 'csv'); -- verify each placement is active SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) - WHERE logicalrelid = 'numbers_hash'::regclass order by placementid; + WHERE logicalrelid = 'numbers_hash'::regclass order by placementid, shardid, nodeport; -- create a reference table CREATE TABLE numbers_reference(a int, b int); @@ -647,7 +647,7 @@ CREATE TABLE numbers_hash_other(a int, b int); SELECT create_distributed_table('numbers_hash_other', 'a'); SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) - WHERE logicalrelid = 'numbers_hash_other'::regclass order by placementid; + WHERE logicalrelid = 'numbers_hash_other'::regclass order by placementid, shardid, nodeport; -- manually corrupt pg_dist_shard such that both copies of one shard is placed in -- worker_1. This is to test the behavior when no replica of a shard is accessible. @@ -675,7 +675,7 @@ COPY numbers_hash FROM STDIN WITH (FORMAT 'csv'); -- verify shards in the first worker as marked invalid SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) - WHERE logicalrelid = 'numbers_hash'::regclass order by placementid; + WHERE logicalrelid = 'numbers_hash'::regclass order by placementid, shardid, nodeport; -- try to insert into a reference table copy should fail COPY numbers_reference FROM STDIN WITH (FORMAT 'csv'); @@ -702,7 +702,7 @@ COPY numbers_hash_other FROM STDIN WITH (FORMAT 'csv'); -- since copy has failed altogether SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) - WHERE logicalrelid = 'numbers_hash_other'::regclass order by placementid; + WHERE logicalrelid = 'numbers_hash_other'::regclass order by placementid, shardid, nodeport; -- re-enable test_user on the first worker \c - :default_user - :worker_1_port @@ -748,7 +748,7 @@ SELECT count(a) FROM numbers_hash; -- verify shard is still marked as valid SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) - WHERE logicalrelid = 'numbers_hash'::regclass order by placementid; + WHERE logicalrelid = 'numbers_hash'::regclass order by placementid, shardid, nodeport; DROP TABLE numbers_hash; SELECT * FROM run_command_on_workers('DROP USER test_user'); @@ -815,7 +815,7 @@ ABORT; -- copy into a table with a JSONB column CREATE TABLE copy_jsonb (key text, value jsonb, extra jsonb default '["default"]'::jsonb); -SELECT create_distributed_table('copy_jsonb', 'key'); +SELECT create_distributed_table('copy_jsonb', 'key', colocate_with => 'none'); -- JSONB from text should work \COPY copy_jsonb (key, value) FROM STDIN diff --git a/src/test/regress/input/multi_create_schema.source b/src/test/regress/input/multi_create_schema.source index acb34edd4..4580214b5 100644 --- a/src/test/regress/input/multi_create_schema.source +++ b/src/test/regress/input/multi_create_schema.source @@ -8,7 +8,7 @@ CREATE TABLE nation ( n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152)); -SELECT master_create_distributed_table('tpch.nation', 'n_nationkey', 'append'); +SELECT create_distributed_table('tpch.nation', 'n_nationkey', 'append'); \copy tpch.nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|' diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule index 91dc1d9a2..aa4fa3a6c 100644 --- a/src/test/regress/multi_schedule +++ b/src/test/regress/multi_schedule @@ -147,7 +147,8 @@ test: multi_outer_join # is independed from the rest of the group, it is added to increase parallelism. # --- test: multi_create_fdw -test: multi_complex_count_distinct multi_select_distinct multi_modifications +test: multi_complex_count_distinct multi_select_distinct +test: multi_modifications test: multi_distribution_metadata test: multi_generate_ddl_commands multi_create_shards multi_prune_shard_list multi_repair_shards test: multi_upsert multi_simple_queries multi_create_insert_proxy multi_data_types diff --git a/src/test/regress/output/multi_agg_distinct.source b/src/test/regress/output/multi_agg_distinct.source index 7b06ae8da..0c5a57cf5 100644 --- a/src/test/regress/output/multi_agg_distinct.source +++ b/src/test/regress/output/multi_agg_distinct.source @@ -19,9 +19,9 @@ CREATE TABLE lineitem_range ( l_shipinstruct char(25) not null, l_shipmode char(10) not null, l_comment varchar(44) not null ); -SELECT master_create_distributed_table('lineitem_range', 'l_orderkey', 'range'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('lineitem_range', 'l_orderkey', 'range'); + create_distributed_table +-------------------------- (1 row) @@ -127,15 +127,10 @@ CREATE TABLE lineitem_hash ( l_shipinstruct char(25) not null, l_shipmode char(10) not null, l_comment varchar(44) not null ); -SELECT master_create_distributed_table('lineitem_hash', 'l_orderkey', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('lineitem_hash', 4, 1); - master_create_worker_shards ------------------------------ +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('lineitem_hash', 'l_orderkey', 'hash'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/output/multi_agg_type_conversion.source b/src/test/regress/output/multi_agg_type_conversion.source index 1d8e1f64d..5fc377c69 100644 --- a/src/test/regress/output/multi_agg_type_conversion.source +++ b/src/test/regress/output/multi_agg_type_conversion.source @@ -33,9 +33,9 @@ CREATE TABLE aggregate_type ( float_value float(20) not null, double_value float(40) not null, interval_value interval not null); -SELECT master_create_distributed_table('aggregate_type', 'float_value', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('aggregate_type', 'float_value', 'append'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/output/multi_alter_table_statements.source b/src/test/regress/output/multi_alter_table_statements.source index 955ccd7df..191abafb7 100644 --- a/src/test/regress/output/multi_alter_table_statements.source +++ b/src/test/regress/output/multi_alter_table_statements.source @@ -24,9 +24,9 @@ CREATE TABLE lineitem_alter ( l_comment varchar(44) not null ) WITH ( fillfactor = 80 ); -SELECT master_create_distributed_table('lineitem_alter', 'l_orderkey', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('lineitem_alter', 'l_orderkey', 'append'); + create_distributed_table +-------------------------- (1 row) @@ -548,15 +548,11 @@ SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; -- Create single-shard table (to avoid deadlocks in the upcoming test hackery) CREATE TABLE single_shard_items (id integer NOT NULL, name text); -SELECT master_create_distributed_table('single_shard_items', 'id', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('single_shard_items', 1, 2); - master_create_worker_shards ------------------------------ +SET citus.shard_count TO 1; +SET citus.shard_replication_factor TO 2; +SELECT create_distributed_table('single_shard_items', 'id', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -731,15 +727,10 @@ SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; RESET citus.multi_shard_commit_protocol; -- verify that not any of shard placements are marked as failed when a query failure occurs CREATE TABLE test_ab (a int, b int); -SELECT master_create_distributed_table('test_ab', 'a', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('test_ab', 8, 2); - master_create_worker_shards ------------------------------ +SET citus.shard_count TO 8; +SELECT create_distributed_table('test_ab', 'a', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -958,6 +949,8 @@ SELECT indexname, tablename FROM pg_indexes WHERE tablename like 'lineitem_alte \c - - - :master_port -- verify alter table and drop sequence in the same transaction does not cause deadlock +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 2; CREATE TABLE sequence_deadlock_test (a serial, b serial); SELECT create_distributed_table('sequence_deadlock_test', 'a'); create_distributed_table diff --git a/src/test/regress/output/multi_append_table_to_shard.source b/src/test/regress/output/multi_append_table_to_shard.source index 79f387b30..2c7b7dd66 100644 --- a/src/test/regress/output/multi_append_table_to_shard.source +++ b/src/test/regress/output/multi_append_table_to_shard.source @@ -19,9 +19,9 @@ CREATE TABLE multi_append_table_to_shard_left left_number INTEGER not null, left_text TEXT not null ); -SELECT master_create_distributed_table('multi_append_table_to_shard_left', 'left_number', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('multi_append_table_to_shard_left', 'left_number', 'append'); + create_distributed_table +-------------------------- (1 row) @@ -30,15 +30,11 @@ CREATE TABLE multi_append_table_to_shard_right_reference_hash right_number INTEGER not null, right_text TEXT not null ); -SELECT master_create_distributed_table('multi_append_table_to_shard_right_reference_hash', 'right_number', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('multi_append_table_to_shard_right_reference_hash', 1, 1); - master_create_worker_shards ------------------------------ +SET citus.shard_count TO 1; +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('multi_append_table_to_shard_right_reference_hash', 'right_number', 'hash'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/output/multi_complex_count_distinct.source b/src/test/regress/output/multi_complex_count_distinct.source index 4ddf8ff2a..1eff1b3ad 100644 --- a/src/test/regress/output/multi_complex_count_distinct.source +++ b/src/test/regress/output/multi_complex_count_distinct.source @@ -2,6 +2,8 @@ -- COMPLEX_COUNT_DISTINCT -- SET citus.next_shard_id TO 240000; +SET citus.shard_count TO 8; +SET citus.shard_replication_factor TO 1; CREATE TABLE lineitem_hash ( l_orderkey bigint not null, l_partkey integer not null, @@ -21,15 +23,9 @@ CREATE TABLE lineitem_hash ( l_comment varchar(44) not null, PRIMARY KEY(l_orderkey, l_linenumber) ); -SELECT master_create_distributed_table('lineitem_hash', 'l_orderkey', 'hash'); - master_create_distributed_table ---------------------------------- - -(1 row) - -SELECT master_create_worker_shards('lineitem_hash', 8, 1); - master_create_worker_shards ------------------------------ +SELECT create_distributed_table('lineitem_hash', 'l_orderkey', 'hash'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/output/multi_copy.source b/src/test/regress/output/multi_copy.source index b45e5261a..6ebb8ff9d 100644 --- a/src/test/regress/output/multi_copy.source +++ b/src/test/regress/output/multi_copy.source @@ -826,17 +826,17 @@ COPY numbers_hash FROM STDIN WITH (FORMAT 'csv'); -- verify each placement is active SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) - WHERE logicalrelid = 'numbers_hash'::regclass order by placementid; + WHERE logicalrelid = 'numbers_hash'::regclass order by placementid, shardid, nodeport; shardid | shardstate | nodename | nodeport ---------+------------+-----------+---------- - 560171 | 1 | localhost | 57637 560171 | 1 | localhost | 57638 - 560172 | 1 | localhost | 57638 + 560171 | 1 | localhost | 57637 560172 | 1 | localhost | 57637 - 560173 | 1 | localhost | 57637 + 560172 | 1 | localhost | 57638 560173 | 1 | localhost | 57638 - 560174 | 1 | localhost | 57638 + 560173 | 1 | localhost | 57637 560174 | 1 | localhost | 57637 + 560174 | 1 | localhost | 57638 (8 rows) -- create a reference table @@ -858,17 +858,17 @@ SELECT create_distributed_table('numbers_hash_other', 'a'); SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) - WHERE logicalrelid = 'numbers_hash_other'::regclass order by placementid; + WHERE logicalrelid = 'numbers_hash_other'::regclass order by placementid, shardid, nodeport; shardid | shardstate | nodename | nodeport ---------+------------+-----------+---------- - 560176 | 1 | localhost | 57638 560176 | 1 | localhost | 57637 - 560177 | 1 | localhost | 57637 + 560176 | 1 | localhost | 57638 560177 | 1 | localhost | 57638 - 560178 | 1 | localhost | 57638 + 560177 | 1 | localhost | 57637 560178 | 1 | localhost | 57637 - 560179 | 1 | localhost | 57637 + 560178 | 1 | localhost | 57638 560179 | 1 | localhost | 57638 + 560179 | 1 | localhost | 57637 (8 rows) -- manually corrupt pg_dist_shard such that both copies of one shard is placed in @@ -897,17 +897,17 @@ CONTEXT: COPY numbers_hash, line 6: "6,6" -- verify shards in the first worker as marked invalid SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) - WHERE logicalrelid = 'numbers_hash'::regclass order by placementid; + WHERE logicalrelid = 'numbers_hash'::regclass order by placementid, shardid, nodeport; shardid | shardstate | nodename | nodeport ---------+------------+-----------+---------- - 560171 | 3 | localhost | 57637 560171 | 1 | localhost | 57638 - 560172 | 1 | localhost | 57638 + 560171 | 3 | localhost | 57637 560172 | 3 | localhost | 57637 - 560173 | 3 | localhost | 57637 + 560172 | 1 | localhost | 57638 560173 | 1 | localhost | 57638 - 560174 | 1 | localhost | 57638 + 560173 | 3 | localhost | 57637 560174 | 3 | localhost | 57637 + 560174 | 1 | localhost | 57638 (8 rows) -- try to insert into a reference table copy should fail @@ -941,17 +941,17 @@ CONTEXT: COPY numbers_hash_other, line 1: "1,1" -- since copy has failed altogether SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) - WHERE logicalrelid = 'numbers_hash_other'::regclass order by placementid; + WHERE logicalrelid = 'numbers_hash_other'::regclass order by placementid, shardid, nodeport; shardid | shardstate | nodename | nodeport ---------+------------+-----------+---------- 560176 | 1 | localhost | 57637 560176 | 1 | localhost | 57637 - 560177 | 1 | localhost | 57637 560177 | 1 | localhost | 57638 - 560178 | 1 | localhost | 57638 + 560177 | 1 | localhost | 57637 560178 | 1 | localhost | 57637 - 560179 | 1 | localhost | 57637 + 560178 | 1 | localhost | 57638 560179 | 1 | localhost | 57638 + 560179 | 1 | localhost | 57637 (8 rows) -- re-enable test_user on the first worker @@ -993,17 +993,17 @@ SELECT count(a) FROM numbers_hash; -- verify shard is still marked as valid SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) - WHERE logicalrelid = 'numbers_hash'::regclass order by placementid; + WHERE logicalrelid = 'numbers_hash'::regclass order by placementid, shardid, nodeport; shardid | shardstate | nodename | nodeport ---------+------------+-----------+---------- - 560180 | 1 | localhost | 57637 560180 | 1 | localhost | 57638 - 560181 | 1 | localhost | 57638 + 560180 | 1 | localhost | 57637 560181 | 1 | localhost | 57637 - 560182 | 1 | localhost | 57637 + 560181 | 1 | localhost | 57638 560182 | 1 | localhost | 57638 - 560183 | 1 | localhost | 57638 + 560182 | 1 | localhost | 57637 560183 | 1 | localhost | 57637 + 560183 | 1 | localhost | 57638 (8 rows) DROP TABLE numbers_hash; @@ -1094,7 +1094,7 @@ NOTICE: Copying data from local table... ABORT; -- copy into a table with a JSONB column CREATE TABLE copy_jsonb (key text, value jsonb, extra jsonb default '["default"]'::jsonb); -SELECT create_distributed_table('copy_jsonb', 'key'); +SELECT create_distributed_table('copy_jsonb', 'key', colocate_with => 'none'); create_distributed_table -------------------------- diff --git a/src/test/regress/output/multi_create_schema.source b/src/test/regress/output/multi_create_schema.source index bd16cd559..4fedfe112 100644 --- a/src/test/regress/output/multi_create_schema.source +++ b/src/test/regress/output/multi_create_schema.source @@ -5,9 +5,9 @@ CREATE TABLE nation ( n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152)); -SELECT master_create_distributed_table('tpch.nation', 'n_nationkey', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('tpch.nation', 'n_nationkey', 'append'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/sql/multi_agg_approximate_distinct.sql b/src/test/regress/sql/multi_agg_approximate_distinct.sql index a6904baf0..22946aaee 100644 --- a/src/test/regress/sql/multi_agg_approximate_distinct.sql +++ b/src/test/regress/sql/multi_agg_approximate_distinct.sql @@ -68,6 +68,8 @@ SELECT count(DISTINCT l_orderkey) as distinct_order_count, l_quantity FROM linei -- Check that approximate count(distinct) works at a table in a schema other than public -- create necessary objects +SET citus.next_shard_id TO 20000000; +SET citus.next_placement_id TO 20000000; CREATE SCHEMA test_count_distinct_schema; CREATE TABLE test_count_distinct_schema.nation_hash( @@ -76,8 +78,7 @@ CREATE TABLE test_count_distinct_schema.nation_hash( n_regionkey integer not null, n_comment varchar(152) ); -SELECT master_create_distributed_table('test_count_distinct_schema.nation_hash', 'n_nationkey', 'hash'); -SELECT master_create_worker_shards('test_count_distinct_schema.nation_hash', 4, 2); +SELECT create_distributed_table('test_count_distinct_schema.nation_hash', 'n_nationkey', 'hash'); \copy test_count_distinct_schema.nation_hash FROM STDIN with delimiter '|'; 0|ALGERIA|0|haggle. carefully final deposits detect slyly agai diff --git a/src/test/regress/sql/multi_cache_invalidation.sql b/src/test/regress/sql/multi_cache_invalidation.sql index 83cd3e84a..72b5ba801 100644 --- a/src/test/regress/sql/multi_cache_invalidation.sql +++ b/src/test/regress/sql/multi_cache_invalidation.sql @@ -1,9 +1,10 @@ SET citus.next_shard_id TO 1601000; +SET citus.shard_count TO 1; +SET citus.shard_replication_factor TO 1; CREATE TABLE tab9 (test_id integer NOT NULL, data int); CREATE TABLE tab10 (test_id integer NOT NULL, data int); -SELECT master_create_distributed_table('tab9', 'test_id', 'hash'); +SELECT create_distributed_table('tab9', 'test_id', 'hash'); SELECT master_create_distributed_table('tab10', 'test_id', 'hash'); -SELECT master_create_worker_shards('tab9', 1, 1); TRUNCATE tab9; UPDATE pg_dist_shard SET logicalrelid = 'tab10'::regclass WHERE logicalrelid = 'tab9'::regclass; TRUNCATE tab10; diff --git a/src/test/regress/sql/multi_citus_tools.sql b/src/test/regress/sql/multi_citus_tools.sql index 36f58b2a6..11067f3b7 100644 --- a/src/test/regress/sql/multi_citus_tools.sql +++ b/src/test/regress/sql/multi_citus_tools.sql @@ -208,9 +208,9 @@ SELECT * FROM run_command_on_workers('select 1') ORDER BY 2 ASC; SELECT * FROM run_command_on_workers('select count(*) from pg_dist_partition') ORDER BY 2 ASC; -- make sure run_on_all_placements respects shardstate +SET citus.shard_count TO 5; CREATE TABLE check_placements (key int); -SELECT master_create_distributed_table('check_placements', 'key', 'hash'); -SELECT master_create_worker_shards('check_placements', 5, 2); +SELECT create_distributed_table('check_placements', 'key', 'hash'); SELECT * FROM run_command_on_placements('check_placements', 'select 1'); UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid % 2 = 0 AND nodeport = :worker_1_port; @@ -219,11 +219,11 @@ DROP TABLE check_placements CASCADE; -- make sure run_on_all_colocated_placements correctly detects colocation CREATE TABLE check_colocated (key int); -SELECT master_create_distributed_table('check_colocated', 'key', 'hash'); -SELECT master_create_worker_shards('check_colocated', 5, 2); +SELECT create_distributed_table('check_colocated', 'key', 'hash'); CREATE TABLE second_table (key int); -SELECT master_create_distributed_table('second_table', 'key', 'hash'); -SELECT master_create_worker_shards('second_table', 4, 2); + +SET citus.shard_count TO 4; +SELECT create_distributed_table('second_table', 'key', 'hash'); SELECT * FROM run_command_on_colocated_placements('check_colocated', 'second_table', 'select 1'); -- even when the difference is in replication factor, an error is thrown @@ -256,9 +256,10 @@ DROP TABLE check_colocated CASCADE; DROP TABLE second_table CASCADE; -- runs on all shards +SET citus.shard_count TO 5; + CREATE TABLE check_shards (key int); -SELECT master_create_distributed_table('check_shards', 'key', 'hash'); -SELECT master_create_worker_shards('check_shards', 5, 2); +SELECT create_distributed_table('check_shards', 'key', 'hash'); SELECT * FROM run_command_on_shards('check_shards', 'select 1'); UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid % 2 = 0; SELECT * FROM run_command_on_shards('check_shards', 'select 1'); diff --git a/src/test/regress/sql/multi_cluster_management.sql b/src/test/regress/sql/multi_cluster_management.sql index 0d74fc615..0b720e68c 100644 --- a/src/test/regress/sql/multi_cluster_management.sql +++ b/src/test/regress/sql/multi_cluster_management.sql @@ -32,10 +32,12 @@ SELECT master_disable_node('localhost', :worker_2_port); SELECT master_get_active_worker_nodes(); -- add some shard placements to the cluster +SET citus.shard_count TO 16; +SET citus.shard_replication_factor TO 1; + SELECT isactive FROM master_activate_node('localhost', :worker_2_port); CREATE TABLE cluster_management_test (col_1 text, col_2 int); -SELECT master_create_distributed_table('cluster_management_test', 'col_1', 'hash'); -SELECT master_create_worker_shards('cluster_management_test', 16, 1); +SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash'); -- see that there are some active placements in the candidate node SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport=:worker_2_port; @@ -152,6 +154,7 @@ SELECT 1 FROM master_add_node('localhost', :worker_1_port); SELECT 1 FROM master_add_node('localhost', :worker_2_port); -- check that a distributed table can be created after adding a node in a transaction +SET citus.shard_count TO 4; SELECT master_remove_node('localhost', :worker_2_port); BEGIN; diff --git a/src/test/regress/sql/multi_create_insert_proxy.sql b/src/test/regress/sql/multi_create_insert_proxy.sql index 122857c43..ac8ac1ccf 100644 --- a/src/test/regress/sql/multi_create_insert_proxy.sql +++ b/src/test/regress/sql/multi_create_insert_proxy.sql @@ -50,9 +50,10 @@ CREATE TABLE insert_target ( -- squelch WARNINGs that contain worker_port SET client_min_messages TO ERROR; +SET citus.shard_count TO 2; +SET citus.shard_replication_factor TO 1; -SELECT master_create_distributed_table('insert_target', 'id', 'hash'); -SELECT master_create_worker_shards('insert_target', 2, 1); +SELECT create_distributed_table('insert_target', 'id', 'hash'); CREATE TEMPORARY SEQUENCE rows_inserted; SELECT create_insert_proxy_for_table('insert_target', 'rows_inserted') AS proxy_tablename diff --git a/src/test/regress/sql/multi_create_shards.sql b/src/test/regress/sql/multi_create_shards.sql index 4282ebf70..5e6dde8b9 100644 --- a/src/test/regress/sql/multi_create_shards.sql +++ b/src/test/regress/sql/multi_create_shards.sql @@ -53,25 +53,25 @@ CREATE TABLE table_to_distribute ( -- use the table WITH (OIDS) set ALTER TABLE table_to_distribute SET WITH OIDS; -SELECT master_create_distributed_table('table_to_distribute', 'id', 'hash'); +SELECT create_distributed_table('table_to_distribute', 'id', 'hash'); -- revert WITH (OIDS) from above ALTER TABLE table_to_distribute SET WITHOUT OIDS; -- use an index instead of table name -SELECT master_create_distributed_table('table_to_distribute_pkey', 'id', 'hash'); +SELECT create_distributed_table('table_to_distribute_pkey', 'id', 'hash'); -- use a bad column name -SELECT master_create_distributed_table('table_to_distribute', 'bad_column', 'hash'); +SELECT create_distributed_table('table_to_distribute', 'bad_column', 'hash'); -- use unrecognized partition type -SELECT master_create_distributed_table('table_to_distribute', 'name', 'unrecognized'); +SELECT create_distributed_table('table_to_distribute', 'name', 'unrecognized'); -- use a partition column of a type lacking any default operator class -SELECT master_create_distributed_table('table_to_distribute', 'json_data', 'hash'); +SELECT create_distributed_table('table_to_distribute', 'json_data', 'hash'); -- use a partition column of type lacking the required support function (hash) -SELECT master_create_distributed_table('table_to_distribute', 'test_type_data', 'hash'); +SELECT create_distributed_table('table_to_distribute', 'test_type_data', 'hash'); -- distribute table and inspect side effects SELECT master_create_distributed_table('table_to_distribute', 'name', 'hash'); @@ -119,8 +119,9 @@ CREATE FOREIGN TABLE foreign_table_to_distribute ) SERVER fake_fdw_server; -SELECT master_create_distributed_table('foreign_table_to_distribute', 'id', 'hash'); -SELECT master_create_worker_shards('foreign_table_to_distribute', 16, 1); +SET citus.shard_count TO 16; +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('foreign_table_to_distribute', 'id', 'hash'); SELECT shardstorage, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE logicalrelid = 'foreign_table_to_distribute'::regclass @@ -133,8 +134,8 @@ CREATE TABLE weird_shard_count id bigint ); -SELECT master_create_distributed_table('weird_shard_count', 'id', 'hash'); -SELECT master_create_worker_shards('weird_shard_count', 7, 1); +SET citus.shard_count TO 7; +SELECT create_distributed_table('weird_shard_count', 'id', 'hash'); -- Citus ensures all shards are roughly the same size SELECT shardmaxvalue::integer - shardminvalue::integer AS shard_size diff --git a/src/test/regress/sql/multi_create_table.sql b/src/test/regress/sql/multi_create_table.sql index 5017dc587..e50cc311e 100644 --- a/src/test/regress/sql/multi_create_table.sql +++ b/src/test/regress/sql/multi_create_table.sql @@ -28,7 +28,7 @@ CREATE TABLE lineitem ( l_shipmode char(10) not null, l_comment varchar(44) not null, PRIMARY KEY(l_orderkey, l_linenumber) ); -SELECT master_create_distributed_table('lineitem', 'l_orderkey', 'append'); +SELECT create_distributed_table('lineitem', 'l_orderkey', 'append'); CREATE INDEX lineitem_time_index ON lineitem (l_shipdate); @@ -43,7 +43,7 @@ CREATE TABLE orders ( o_shippriority integer not null, o_comment varchar(79) not null, PRIMARY KEY(o_orderkey) ); -SELECT master_create_distributed_table('orders', 'o_orderkey', 'append'); +SELECT create_distributed_table('orders', 'o_orderkey', 'append'); CREATE TABLE orders_reference ( o_orderkey bigint not null, @@ -79,7 +79,7 @@ CREATE TABLE customer_append ( c_acctbal decimal(15,2) not null, c_mktsegment char(10) not null, c_comment varchar(117) not null); -SELECT master_create_distributed_table('customer_append', 'c_custkey', 'append'); +SELECT create_distributed_table('customer_append', 'c_custkey', 'append'); CREATE TABLE nation ( n_nationkey integer not null, @@ -111,7 +111,7 @@ CREATE TABLE part_append ( p_container char(10) not null, p_retailprice decimal(15,2) not null, p_comment varchar(23) not null); -SELECT master_create_distributed_table('part_append', 'p_partkey', 'append'); +SELECT create_distributed_table('part_append', 'p_partkey', 'append'); CREATE TABLE supplier ( @@ -137,7 +137,7 @@ CREATE TABLE supplier_single_shard s_acctbal decimal(15,2) not null, s_comment varchar(101) not null ); -SELECT master_create_distributed_table('supplier_single_shard', 's_suppkey', 'append'); +SELECT create_distributed_table('supplier_single_shard', 's_suppkey', 'append'); CREATE TABLE mx_table_test (col1 int, col2 text); diff --git a/src/test/regress/sql/multi_create_table_constraints.sql b/src/test/regress/sql/multi_create_table_constraints.sql index cbb05fe63..4ab5f850c 100644 --- a/src/test/regress/sql/multi_create_table_constraints.sql +++ b/src/test/regress/sql/multi_create_table_constraints.sql @@ -11,7 +11,7 @@ CREATE TABLE uniq_cns_append_tables partition_col integer UNIQUE, other_col integer ); -SELECT master_create_distributed_table('uniq_cns_append_tables', 'partition_col', 'append'); +SELECT create_distributed_table('uniq_cns_append_tables', 'partition_col', 'append'); CREATE TABLE excl_cns_append_tables ( @@ -19,7 +19,7 @@ CREATE TABLE excl_cns_append_tables other_col integer, EXCLUDE (partition_col WITH =) ); -SELECT master_create_distributed_table('excl_cns_append_tables', 'partition_col', 'append'); +SELECT create_distributed_table('excl_cns_append_tables', 'partition_col', 'append'); -- test that Citus cannot distribute unique constraints that do not include -- the partition column on hash-partitioned tables. @@ -29,14 +29,14 @@ CREATE TABLE pk_on_non_part_col partition_col integer, other_col integer PRIMARY KEY ); -SELECT master_create_distributed_table('pk_on_non_part_col', 'partition_col', 'hash'); +SELECT create_distributed_table('pk_on_non_part_col', 'partition_col', 'hash'); CREATE TABLE uq_on_non_part_col ( partition_col integer, other_col integer UNIQUE ); -SELECT master_create_distributed_table('uq_on_non_part_col', 'partition_col', 'hash'); +SELECT create_distributed_table('uq_on_non_part_col', 'partition_col', 'hash'); CREATE TABLE ex_on_non_part_col ( @@ -44,7 +44,7 @@ CREATE TABLE ex_on_non_part_col other_col integer, EXCLUDE (other_col WITH =) ); -SELECT master_create_distributed_table('ex_on_non_part_col', 'partition_col', 'hash'); +SELECT create_distributed_table('ex_on_non_part_col', 'partition_col', 'hash'); -- now show that Citus can distribute unique and EXCLUDE constraints that -- include the partition column for hash-partitioned tables. @@ -57,14 +57,14 @@ CREATE TABLE pk_on_part_col partition_col integer PRIMARY KEY, other_col integer ); -SELECT master_create_distributed_table('pk_on_part_col', 'partition_col', 'hash'); +SELECT create_distributed_table('pk_on_part_col', 'partition_col', 'hash'); CREATE TABLE uq_part_col ( partition_col integer UNIQUE, other_col integer ); -SELECT master_create_distributed_table('uq_part_col', 'partition_col', 'hash'); +SELECT create_distributed_table('uq_part_col', 'partition_col', 'hash'); CREATE TABLE uq_two_columns ( @@ -72,8 +72,7 @@ CREATE TABLE uq_two_columns other_col integer, UNIQUE (partition_col, other_col) ); -SELECT master_create_distributed_table('uq_two_columns', 'partition_col', 'hash'); -SELECT master_create_worker_shards('uq_two_columns', '4', '2'); +SELECT create_distributed_table('uq_two_columns', 'partition_col', 'hash'); INSERT INTO uq_two_columns (partition_col, other_col) VALUES (1,1); INSERT INTO uq_two_columns (partition_col, other_col) VALUES (1,1); @@ -83,8 +82,7 @@ CREATE TABLE ex_on_part_col other_col integer, EXCLUDE (partition_col WITH =) ); -SELECT master_create_distributed_table('ex_on_part_col', 'partition_col', 'hash'); -SELECT master_create_worker_shards('ex_on_part_col', '4', '2'); +SELECT create_distributed_table('ex_on_part_col', 'partition_col', 'hash'); INSERT INTO ex_on_part_col (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_part_col (partition_col, other_col) VALUES (1,2); @@ -94,8 +92,7 @@ CREATE TABLE ex_on_two_columns other_col integer, EXCLUDE (partition_col WITH =, other_col WITH =) ); -SELECT master_create_distributed_table('ex_on_two_columns', 'partition_col', 'hash'); -SELECT master_create_worker_shards('ex_on_two_columns', '4', '2'); +SELECT create_distributed_table('ex_on_two_columns', 'partition_col', 'hash'); INSERT INTO ex_on_two_columns (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_two_columns (partition_col, other_col) VALUES (1,1); @@ -105,8 +102,7 @@ CREATE TABLE ex_on_two_columns_prt other_col integer, EXCLUDE (partition_col WITH =, other_col WITH =) WHERE (other_col > 100) ); -SELECT master_create_distributed_table('ex_on_two_columns_prt', 'partition_col', 'hash'); -SELECT master_create_worker_shards('ex_on_two_columns_prt', '4', '2'); +SELECT create_distributed_table('ex_on_two_columns_prt', 'partition_col', 'hash'); INSERT INTO ex_on_two_columns_prt (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_two_columns_prt (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_two_columns_prt (partition_col, other_col) VALUES (1,101); @@ -118,7 +114,7 @@ CREATE TABLE ex_wrong_operator other_col tsrange, EXCLUDE USING gist (other_col WITH =, partition_col WITH &&) ); -SELECT master_create_distributed_table('ex_wrong_operator', 'partition_col', 'hash'); +SELECT create_distributed_table('ex_wrong_operator', 'partition_col', 'hash'); CREATE TABLE ex_overlaps ( @@ -126,8 +122,7 @@ CREATE TABLE ex_overlaps other_col tsrange, EXCLUDE USING gist (other_col WITH &&, partition_col WITH =) ); -SELECT master_create_distributed_table('ex_overlaps', 'partition_col', 'hash'); -SELECT master_create_worker_shards('ex_overlaps', '4', '2'); +SELECT create_distributed_table('ex_overlaps', 'partition_col', 'hash'); INSERT INTO ex_overlaps (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-01 00:00:00, 2016-02-01 00:00:00]'); INSERT INTO ex_overlaps (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-15 00:00:00, 2016-02-01 00:00:00]'); @@ -142,14 +137,14 @@ CREATE TABLE pk_on_part_col_named partition_col integer CONSTRAINT pk_on_part_col_named_pk PRIMARY KEY, other_col integer ); -SELECT master_create_distributed_table('pk_on_part_col_named', 'partition_col', 'hash'); +SELECT create_distributed_table('pk_on_part_col_named', 'partition_col', 'hash'); CREATE TABLE uq_part_col_named ( partition_col integer CONSTRAINT uq_part_col_named_uniq UNIQUE, other_col integer ); -SELECT master_create_distributed_table('uq_part_col_named', 'partition_col', 'hash'); +SELECT create_distributed_table('uq_part_col_named', 'partition_col', 'hash'); CREATE TABLE uq_two_columns_named ( @@ -157,8 +152,7 @@ CREATE TABLE uq_two_columns_named other_col integer, CONSTRAINT uq_two_columns_named_uniq UNIQUE (partition_col, other_col) ); -SELECT master_create_distributed_table('uq_two_columns_named', 'partition_col', 'hash'); -SELECT master_create_worker_shards('uq_two_columns_named', '4', '2'); +SELECT create_distributed_table('uq_two_columns_named', 'partition_col', 'hash'); INSERT INTO uq_two_columns_named (partition_col, other_col) VALUES (1,1); INSERT INTO uq_two_columns_named (partition_col, other_col) VALUES (1,1); @@ -168,8 +162,7 @@ CREATE TABLE ex_on_part_col_named other_col integer, CONSTRAINT ex_on_part_col_named_exclude EXCLUDE (partition_col WITH =) ); -SELECT master_create_distributed_table('ex_on_part_col_named', 'partition_col', 'hash'); -SELECT master_create_worker_shards('ex_on_part_col_named', '4', '2'); +SELECT create_distributed_table('ex_on_part_col_named', 'partition_col', 'hash'); INSERT INTO ex_on_part_col_named (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_part_col_named (partition_col, other_col) VALUES (1,2); @@ -179,8 +172,7 @@ CREATE TABLE ex_on_two_columns_named other_col integer, CONSTRAINT ex_on_two_columns_named_exclude EXCLUDE (partition_col WITH =, other_col WITH =) ); -SELECT master_create_distributed_table('ex_on_two_columns_named', 'partition_col', 'hash'); -SELECT master_create_worker_shards('ex_on_two_columns_named', '4', '2'); +SELECT create_distributed_table('ex_on_two_columns_named', 'partition_col', 'hash'); INSERT INTO ex_on_two_columns_named (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_two_columns_named (partition_col, other_col) VALUES (1,1); @@ -192,8 +184,7 @@ CREATE TABLE ex_multiple_excludes CONSTRAINT ex_multiple_excludes_excl1 EXCLUDE (partition_col WITH =, other_col WITH =), CONSTRAINT ex_multiple_excludes_excl2 EXCLUDE (partition_col WITH =, other_other_col WITH =) ); -SELECT master_create_distributed_table('ex_multiple_excludes', 'partition_col', 'hash'); -SELECT master_create_worker_shards('ex_multiple_excludes', '4', '2'); +SELECT create_distributed_table('ex_multiple_excludes', 'partition_col', 'hash'); INSERT INTO ex_multiple_excludes (partition_col, other_col, other_other_col) VALUES (1,1,1); INSERT INTO ex_multiple_excludes (partition_col, other_col, other_other_col) VALUES (1,1,2); INSERT INTO ex_multiple_excludes (partition_col, other_col, other_other_col) VALUES (1,2,1); @@ -204,7 +195,7 @@ CREATE TABLE ex_wrong_operator_named other_col tsrange, CONSTRAINT ex_wrong_operator_named_exclude EXCLUDE USING gist (other_col WITH =, partition_col WITH &&) ); -SELECT master_create_distributed_table('ex_wrong_operator_named', 'partition_col', 'hash'); +SELECT create_distributed_table('ex_wrong_operator_named', 'partition_col', 'hash'); CREATE TABLE ex_overlaps_named ( @@ -212,8 +203,7 @@ CREATE TABLE ex_overlaps_named other_col tsrange, CONSTRAINT ex_overlaps_operator_named_exclude EXCLUDE USING gist (other_col WITH &&, partition_col WITH =) ); -SELECT master_create_distributed_table('ex_overlaps_named', 'partition_col', 'hash'); -SELECT master_create_worker_shards('ex_overlaps_named', '4', '2'); +SELECT create_distributed_table('ex_overlaps_named', 'partition_col', 'hash'); INSERT INTO ex_overlaps_named (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-01 00:00:00, 2016-02-01 00:00:00]'); INSERT INTO ex_overlaps_named (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-15 00:00:00, 2016-02-01 00:00:00]'); @@ -224,7 +214,7 @@ CREATE TABLE uq_range_tables partition_col integer UNIQUE, other_col integer ); -SELECT master_create_distributed_table('uq_range_tables', 'partition_col', 'range'); +SELECT create_distributed_table('uq_range_tables', 'partition_col', 'range'); -- show that CHECK constraints are distributed. CREATE TABLE check_example @@ -233,12 +223,10 @@ CREATE TABLE check_example other_col integer CHECK (other_col >= 100), other_other_col integer CHECK (abs(other_other_col) >= 100) ); -SELECT master_create_distributed_table('check_example', 'partition_col', 'hash'); -SELECT master_create_worker_shards('check_example', '2', '2'); - +SELECT create_distributed_table('check_example', 'partition_col', 'hash'); \c - - - :worker_1_port -\d check_example_partition_col_key_365040 -SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.check_example_365040'::regclass; +\d check_example_partition_col_key_365056 +SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.check_example_365056'::regclass; \c - - - :master_port -- drop unnecessary tables diff --git a/src/test/regress/sql/multi_create_table_new_features.sql b/src/test/regress/sql/multi_create_table_new_features.sql index 11e09e9ca..d527496ad 100644 --- a/src/test/regress/sql/multi_create_table_new_features.sql +++ b/src/test/regress/sql/multi_create_table_new_features.sql @@ -13,7 +13,7 @@ CREATE TABLE table_identity_col ( id integer GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, payload text ); -SELECT master_create_distributed_table('table_identity_col', 'id', 'append'); +SELECT create_distributed_table('table_identity_col', 'id', 'append'); SELECT create_distributed_table('table_identity_col', 'id'); SELECT create_distributed_table('table_identity_col', 'text'); diff --git a/src/test/regress/sql/multi_cross_shard.sql b/src/test/regress/sql/multi_cross_shard.sql index 7f0e51708..3089ccbcb 100644 --- a/src/test/regress/sql/multi_cross_shard.sql +++ b/src/test/regress/sql/multi_cross_shard.sql @@ -18,11 +18,11 @@ INSERT INTO multi_task_table VALUES(3, 'elem_3'); -- Shouldn't log anything when the log level is 'off' SHOW citus.multi_task_query_log_level; -SELECT * FROM multi_task_table; +SELECT * FROM multi_task_table ORDER BY 1; -- Get messages with the log level 'notice' SET citus.multi_task_query_log_level TO notice; -SELECT * FROM multi_task_table; +SELECT * FROM multi_task_table ORDER BY 1; SELECT AVG(id) AS avg_id FROM multi_task_table; -- Get messages with the log level 'error' @@ -65,7 +65,7 @@ INSERT INTO summary_table SELECT id, SUM(order_count) FROM raw_table WHERE id = -- Should have four rows (three rows from the query without where and the one from with where) SET citus.multi_task_query_log_level to DEFAULT; -SELECT * FROM summary_table; +SELECT * FROM summary_table ORDER BY 1,2; -- Set log-level to different levels inside the transaction BEGIN; @@ -80,7 +80,7 @@ ROLLBACK; -- Should have only four rows since the transaction is rollbacked. SET citus.multi_task_query_log_level to DEFAULT; -SELECT * FROM summary_table; +SELECT * FROM summary_table ORDER BY 1,2; -- Test router-select query SET citus.multi_task_query_log_level TO notice; diff --git a/src/test/regress/sql/multi_data_types.sql b/src/test/regress/sql/multi_data_types.sql index 512e02031..9e14c9e80 100644 --- a/src/test/regress/sql/multi_data_types.sql +++ b/src/test/regress/sql/multi_data_types.sql @@ -65,9 +65,8 @@ CREATE TABLE composite_type_partitioned_table col test_composite_type ); -SELECT master_create_distributed_table('composite_type_partitioned_table', 'col', 'hash'); - -SELECT master_create_worker_shards('composite_type_partitioned_table', 4, 1); +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('composite_type_partitioned_table', 'col', 'hash'); -- execute INSERT, SELECT and UPDATE queries on composite_type_partitioned_table INSERT INTO composite_type_partitioned_table VALUES (1, '(1, 2)'::test_composite_type); @@ -91,9 +90,7 @@ CREATE TABLE bugs ( status bug_status ); -SELECT master_create_distributed_table('bugs', 'status', 'hash'); - -SELECT master_create_worker_shards('bugs', 4, 1); +SELECT create_distributed_table('bugs', 'status', 'hash'); -- execute INSERT, SELECT and UPDATE queries on composite_type_partitioned_table INSERT INTO bugs VALUES (1, 'new'); @@ -115,8 +112,7 @@ CREATE TABLE varchar_hash_partitioned_table name varchar ); -SELECT master_create_distributed_table('varchar_hash_partitioned_table', 'name', 'hash'); -SELECT master_create_worker_shards('varchar_hash_partitioned_table', 4, 1); +SELECT create_distributed_table('varchar_hash_partitioned_table', 'name', 'hash'); -- execute INSERT, SELECT and UPDATE queries on composite_type_partitioned_table INSERT INTO varchar_hash_partitioned_table VALUES (1, 'Jason'); diff --git a/src/test/regress/sql/multi_deparse_shard_query.sql b/src/test/regress/sql/multi_deparse_shard_query.sql index 750343931..c9e9eede3 100644 --- a/src/test/regress/sql/multi_deparse_shard_query.sql +++ b/src/test/regress/sql/multi_deparse_shard_query.sql @@ -4,6 +4,7 @@ SET citus.next_shard_id TO 13100000; +SET citus.shard_replication_factor TO 1; CREATE FUNCTION deparse_shard_query_test(text) RETURNS VOID @@ -23,8 +24,7 @@ CREATE TABLE raw_events_1 event_at date DEfAULT now() ); -SELECT master_create_distributed_table('raw_events_1', 'tenant_id', 'hash'); -SELECT master_create_worker_shards('raw_events_1', 4, 1); +SELECT create_distributed_table('raw_events_1', 'tenant_id', 'hash'); -- create the first table CREATE TABLE raw_events_2 @@ -39,8 +39,7 @@ CREATE TABLE raw_events_2 event_at date DEfAULT now() ); -SELECT master_create_distributed_table('raw_events_2', 'tenant_id', 'hash'); -SELECT master_create_worker_shards('raw_events_2', 4, 1); +SELECT create_distributed_table('raw_events_2', 'tenant_id', 'hash'); CREATE TABLE aggregated_events (tenant_id bigint, @@ -52,8 +51,7 @@ CREATE TABLE aggregated_events average_value_6 int, rollup_hour date); -SELECT master_create_distributed_table('aggregated_events', 'tenant_id', 'hash'); -SELECT master_create_worker_shards('aggregated_events', 4, 1); +SELECT create_distributed_table('aggregated_events', 'tenant_id', 'hash'); -- start with very simple examples on a single table diff --git a/src/test/regress/sql/multi_distribution_metadata.sql b/src/test/regress/sql/multi_distribution_metadata.sql index 99b9b7bc9..7100d567b 100644 --- a/src/test/regress/sql/multi_distribution_metadata.sql +++ b/src/test/regress/sql/multi_distribution_metadata.sql @@ -60,10 +60,7 @@ CREATE TABLE events_hash ( id bigint, name text ); -SELECT master_create_distributed_table('events_hash', 'name', 'hash'); - --- create worker shards -SELECT master_create_worker_shards('events_hash', 4, 2); +SELECT create_distributed_table('events_hash', 'name', 'hash'); -- set shardstate of one replication from each shard to 0 (invalid value) UPDATE pg_dist_placement SET shardstate = 0 WHERE shardid BETWEEN 540000 AND 540003 diff --git a/src/test/regress/sql/multi_drop_extension.sql b/src/test/regress/sql/multi_drop_extension.sql index dbe73acd8..593f8f864 100644 --- a/src/test/regress/sql/multi_drop_extension.sql +++ b/src/test/regress/sql/multi_drop_extension.sql @@ -8,7 +8,7 @@ SET citus.next_shard_id TO 550000; CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); -SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append'); +SELECT create_distributed_table('testtableddl', 'distributecol', 'append'); -- this emits a NOTICE message for every table we are dropping with our CASCADE. It would -- be nice to check that we get those NOTICE messages, but it's nicer to not have to @@ -26,7 +26,7 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port); -- verify that a table can be created after the extension has been dropped and recreated CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); -SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append'); +SELECT create_distributed_table('testtableddl', 'distributecol', 'append'); SELECT 1 FROM master_create_empty_shard('testtableddl'); SELECT * FROM testtableddl; DROP TABLE testtableddl; diff --git a/src/test/regress/sql/multi_index_statements.sql b/src/test/regress/sql/multi_index_statements.sql index 65e1e876c..a656b0198 100644 --- a/src/test/regress/sql/multi_index_statements.sql +++ b/src/test/regress/sql/multi_index_statements.sql @@ -5,10 +5,6 @@ -- Check that we can run CREATE INDEX and DROP INDEX statements on distributed -- tables. - -SET citus.next_shard_id TO 640000; - - -- -- CREATE TEST TABLES -- @@ -16,16 +12,17 @@ SET citus.next_shard_id TO 640000; SET citus.next_shard_id TO 102080; CREATE TABLE index_test_range(a int, b int, c int); -SELECT master_create_distributed_table('index_test_range', 'a', 'range'); +SELECT create_distributed_table('index_test_range', 'a', 'range'); SELECT master_create_empty_shard('index_test_range'); SELECT master_create_empty_shard('index_test_range'); +SET citus.shard_count TO 8; +SET citus.shard_replication_factor TO 2; CREATE TABLE index_test_hash(a int, b int, c int); -SELECT master_create_distributed_table('index_test_hash', 'a', 'hash'); -SELECT master_create_worker_shards('index_test_hash', 8, 2); +SELECT create_distributed_table('index_test_hash', 'a', 'hash'); CREATE TABLE index_test_append(a int, b int, c int); -SELECT master_create_distributed_table('index_test_append', 'a', 'append'); +SELECT create_distributed_table('index_test_append', 'a', 'append'); SELECT master_create_empty_shard('index_test_append'); SELECT master_create_empty_shard('index_test_append'); diff --git a/src/test/regress/sql/multi_insert_select.sql b/src/test/regress/sql/multi_insert_select.sql index 2ac88a69f..a05d79318 100644 --- a/src/test/regress/sql/multi_insert_select.sql +++ b/src/test/regress/sql/multi_insert_select.sql @@ -1742,7 +1742,7 @@ TRUNCATE raw_events_first; BEGIN; INSERT INTO raw_events_first (user_id, value_1) SELECT s, s FROM generate_series(1, 5) s; -SELECT user_id, value_1 FROM raw_events_first; +SELECT user_id, value_1 FROM raw_events_first ORDER BY 1; ROLLBACK; -- INSERT ... SELECT and single-shard SELECT in the same transaction is supported diff --git a/src/test/regress/sql/multi_modifications.sql b/src/test/regress/sql/multi_modifications.sql index fd7f12f12..99cf36022 100644 --- a/src/test/regress/sql/multi_modifications.sql +++ b/src/test/regress/sql/multi_modifications.sql @@ -28,17 +28,17 @@ CREATE TABLE insufficient_shards ( LIKE limit_orders ); CREATE TABLE range_partitioned ( LIKE limit_orders ); CREATE TABLE append_partitioned ( LIKE limit_orders ); -SELECT master_create_distributed_table('limit_orders', 'id', 'hash'); -SELECT master_create_distributed_table('multiple_hash', 'category', 'hash'); -SELECT master_create_distributed_table('insufficient_shards', 'id', 'hash'); -SELECT master_create_distributed_table('range_partitioned', 'id', 'range'); -SELECT master_create_distributed_table('append_partitioned', 'id', 'append'); +SET citus.shard_count TO 2; -SELECT master_create_worker_shards('limit_orders', 2, 2); -SELECT master_create_worker_shards('multiple_hash', 2, 2); +SELECT create_distributed_table('limit_orders', 'id', 'hash'); +SELECT create_distributed_table('multiple_hash', 'id', 'hash'); +SELECT create_distributed_table('range_partitioned', 'id', 'range'); +SELECT create_distributed_table('append_partitioned', 'id', 'append'); +SET citus.shard_count TO 1; +SET citus.shard_replication_factor TO 1; -- make a single shard that covers no partition values -SELECT master_create_worker_shards('insufficient_shards', 1, 1); +SELECT create_distributed_table('insufficient_shards', 'id', 'hash'); UPDATE pg_dist_shard SET shardminvalue = 0, shardmaxvalue = 0 WHERE logicalrelid = 'insufficient_shards'::regclass; @@ -309,7 +309,7 @@ UPDATE limit_orders SET limit_price = 0.00 FROM bidders limit_orders.bidder_id = bidders.id AND bidders.name = 'Bernie Madoff'; --- the connection used for the INSERT is claimed by pull-push, causing the UPDATE to fail +-- should succeed with a CTE WITH deleted_orders AS (INSERT INTO limit_orders VALUES (399, 'PDR', 14, '2017-07-02 16:32:15', 'sell', 43)) UPDATE limit_orders SET symbol = 'GM'; @@ -420,8 +420,8 @@ SELECT * FROM multiple_hash WHERE category = '2' ORDER BY category, data; -- verify interaction of default values, SERIAL, and RETURNING \set QUIET on CREATE TABLE app_analytics_events (id serial, app_id integer, name text); -SELECT master_create_distributed_table('app_analytics_events', 'app_id', 'hash'); -SELECT master_create_worker_shards('app_analytics_events', 4, 1); +SET citus.shard_count TO 4; +SELECT create_distributed_table('app_analytics_events', 'app_id', 'hash'); INSERT INTO app_analytics_events VALUES (DEFAULT, 101, 'Fauxkemon Geaux') RETURNING id; INSERT INTO app_analytics_events (app_id, name) VALUES (102, 'Wayz') RETURNING id; @@ -468,7 +468,7 @@ DROP TABLE app_analytics_events; -- Test multi-row insert with a dropped column before the partition column CREATE TABLE app_analytics_events (id int default 3, app_id integer, name text); -SELECT create_distributed_table('app_analytics_events', 'name'); +SELECT create_distributed_table('app_analytics_events', 'name', colocate_with => 'none'); ALTER TABLE app_analytics_events DROP COLUMN app_id; diff --git a/src/test/regress/sql/multi_name_lengths.sql b/src/test/regress/sql/multi_name_lengths.sql index c8d004f92..a68433eab 100644 --- a/src/test/regress/sql/multi_name_lengths.sql +++ b/src/test/regress/sql/multi_name_lengths.sql @@ -5,6 +5,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 225000; SET citus.multi_shard_commit_protocol = '2pc'; +SET citus.shard_count TO 2; -- Verify that a table name > 56 characters gets hashed properly. CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 ( @@ -17,6 +18,9 @@ SELECT master_create_worker_shards('too_long_12345678901234567890123456789012345 \dt too_long_* \c - - - :master_port +SET citus.shard_count TO 2; +SET citus.shard_replication_factor TO 2; + -- Verify that the UDF works and rejects bad arguments. SELECT shard_name(NULL, 666666); SELECT shard_name(0, 666666); @@ -34,8 +38,7 @@ CREATE TABLE name_lengths ( constraint constraint_a UNIQUE (col1) ); -SELECT master_create_distributed_table('name_lengths', 'col1', 'hash'); -SELECT master_create_worker_shards('name_lengths', '2', '2'); +SELECT create_distributed_table('name_lengths', 'col1', 'hash'); -- Verify that we CAN add columns with "too-long names", because -- the columns' names are not extended in the corresponding shard tables. @@ -82,6 +85,9 @@ CREATE INDEX tmp_idx_12345678901234567890123456789012345678901234567890123456789 \d tmp_idx_* \c - - - :master_port +SET citus.shard_count TO 2; +SET citus.shard_replication_factor TO 2; + -- Verify that distributed tables with too-long names -- for CHECK constraints are no trouble. CREATE TABLE sneaky_name_lengths ( @@ -90,8 +96,7 @@ CREATE TABLE sneaky_name_lengths ( int_col_12345678901234567890123456789012345678901234567890 integer not null, CHECK (int_col_12345678901234567890123456789012345678901234567890 > 100) ); -SELECT master_create_distributed_table('sneaky_name_lengths', 'col1', 'hash'); -SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2'); +SELECT create_distributed_table('sneaky_name_lengths', 'col1', 'hash'); DROP TABLE sneaky_name_lengths CASCADE; CREATE TABLE sneaky_name_lengths ( @@ -111,6 +116,9 @@ SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2'); SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.sneaky_name_lengths_225006'::regclass; \c - - - :master_port +SET citus.shard_count TO 2; +SET citus.shard_replication_factor TO 2; + DROP TABLE sneaky_name_lengths CASCADE; -- verify that named constraint with too-long name gets hashed properly @@ -120,13 +128,15 @@ CREATE TABLE sneaky_name_lengths ( int_col_12345678901234567890123456789012345678901234567890 integer not null, constraint unique_12345678901234567890123456789012345678901234567890 UNIQUE (col1) ); -SELECT master_create_distributed_table('sneaky_name_lengths', 'col1', 'hash'); -SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2'); +SELECT create_distributed_table('sneaky_name_lengths', 'col1', 'hash'); \c - - - :worker_1_port \di unique*225008 \c - - - :master_port +SET citus.shard_count TO 2; +SET citus.shard_replication_factor TO 2; + DROP TABLE sneaky_name_lengths CASCADE; -- Verify that much larger shardIds are handled properly @@ -134,21 +144,22 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 2250000000000; CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 ( col1 integer not null, col2 integer not null); -SELECT master_create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash'); -SELECT master_create_worker_shards('too_long_12345678901234567890123456789012345678901234567890', '2', '2'); +SELECT create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash'); \c - - - :worker_1_port \dt *225000000000* \c - - - :master_port +SET citus.shard_count TO 2; +SET citus.shard_replication_factor TO 2; + DROP TABLE too_long_12345678901234567890123456789012345678901234567890 CASCADE; -- Verify that multi-byte boundaries are respected for databases with UTF8 encoding. CREATE TABLE U&"elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D" UESCAPE '!' ( col1 integer not null PRIMARY KEY, col2 integer not null); -SELECT master_create_distributed_table(U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!', 'col1', 'hash'); -SELECT master_create_worker_shards(U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!', '2', '2'); +SELECT create_distributed_table(U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!', 'col1', 'hash'); -- Verify that quoting is used in shard_name SELECT shard_name(U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!'::regclass, min(shardid)) @@ -160,13 +171,15 @@ WHERE logicalrelid = U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!0 \di public.elephant_* \c - - - :master_port +SET citus.shard_count TO 2; +SET citus.shard_replication_factor TO 2; + -- Verify that shard_name UDF supports schemas CREATE SCHEMA multi_name_lengths; CREATE TABLE multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890 ( col1 integer not null, col2 integer not null); -SELECT master_create_distributed_table('multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash'); -SELECT master_create_worker_shards('multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890', 2, 1); +SELECT create_distributed_table('multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash'); SELECT shard_name('multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890'::regclass, min(shardid)) FROM pg_dist_shard diff --git a/src/test/regress/sql/multi_partition_pruning.sql b/src/test/regress/sql/multi_partition_pruning.sql index 80ba98e6a..9b1ea1e62 100644 --- a/src/test/regress/sql/multi_partition_pruning.sql +++ b/src/test/regress/sql/multi_partition_pruning.sql @@ -38,7 +38,7 @@ CREATE TABLE varchar_partitioned_table ( varchar_column varchar(100) ); -SELECT master_create_distributed_table('varchar_partitioned_table', 'varchar_column', 'append'); +SELECT create_distributed_table('varchar_partitioned_table', 'varchar_column', 'append'); -- Create logical shards and shard placements with shardid 100,101 @@ -67,7 +67,7 @@ CREATE TABLE array_partitioned_table ( array_column text[] ); -SELECT master_create_distributed_table('array_partitioned_table', 'array_column', 'append'); +SELECT create_distributed_table('array_partitioned_table', 'array_column', 'append'); SET client_min_messages TO DEBUG2; -- Create logical shard with shardid 102, 103 @@ -105,7 +105,7 @@ CREATE TABLE composite_partitioned_table ( composite_column composite_type ); -SELECT master_create_distributed_table('composite_partitioned_table', 'composite_column', 'append'); +SELECT create_distributed_table('composite_partitioned_table', 'composite_column', 'append'); SET client_min_messages TO DEBUG2; -- Create logical shard with shardid 104, 105 diff --git a/src/test/regress/sql/multi_prepare_plsql.sql b/src/test/regress/sql/multi_prepare_plsql.sql index e047c157c..8bb7ca587 100644 --- a/src/test/regress/sql/multi_prepare_plsql.sql +++ b/src/test/regress/sql/multi_prepare_plsql.sql @@ -187,8 +187,8 @@ CREATE TABLE plpgsql_table ( key int, value int ); -SELECT master_create_distributed_table('plpgsql_table','key','hash'); -SELECT master_create_worker_shards('plpgsql_table',4,1); +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('plpgsql_table','key','hash'); CREATE FUNCTION no_parameter_insert() RETURNS void as $$ BEGIN diff --git a/src/test/regress/sql/multi_prepare_sql.sql b/src/test/regress/sql/multi_prepare_sql.sql index 45bc8e2a1..7cf6df783 100644 --- a/src/test/regress/sql/multi_prepare_sql.sql +++ b/src/test/regress/sql/multi_prepare_sql.sql @@ -160,9 +160,8 @@ CREATE TABLE router_executor_table ( comment varchar(20), stats test_composite_type ); - -SELECT master_create_distributed_table('router_executor_table', 'id', 'hash'); -SELECT master_create_worker_shards('router_executor_table', 2, 2); +SET citus.shard_count TO 2; +SELECT create_distributed_table('router_executor_table', 'id', 'hash'); -- test parameterized inserts PREPARE prepared_insert(varchar(20)) AS @@ -209,8 +208,9 @@ CREATE TABLE prepare_table ( key int, value int ); -SELECT master_create_distributed_table('prepare_table','key','hash'); -SELECT master_create_worker_shards('prepare_table',4,1); +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('prepare_table','key','hash'); PREPARE prepared_no_parameter_insert AS INSERT INTO prepare_table (key) VALUES (0); @@ -581,8 +581,9 @@ CREATE OR REPLACE FUNCTION immutable_bleat(text) RETURNS int LANGUAGE plpgsql IM -- test table CREATE TABLE test_table (test_id integer NOT NULL, data text); -SELECT master_create_distributed_table('test_table', 'test_id', 'hash'); -SELECT master_create_worker_shards('test_table', 2, 2); +SET citus.shard_count TO 2; +SET citus.shard_replication_factor TO 2; +SELECT create_distributed_table('test_table', 'test_id', 'hash'); -- avoid 9.6+ only context messages \set VERBOSITY terse diff --git a/src/test/regress/sql/multi_prune_shard_list.sql b/src/test/regress/sql/multi_prune_shard_list.sql index 38f9ee1bd..39f32a502 100644 --- a/src/test/regress/sql/multi_prune_shard_list.sql +++ b/src/test/regress/sql/multi_prune_shard_list.sql @@ -42,10 +42,8 @@ CREATE FUNCTION print_sorted_shard_intervals(regclass) -- create distributed table observe shard pruning CREATE TABLE pruning ( species text, last_pruned date, plant_id integer ); -SELECT master_create_distributed_table('pruning', 'species', 'hash'); - --- create worker shards -SELECT master_create_worker_shards('pruning', 4, 1); +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('pruning', 'species', 'hash'); -- with no values, expect all shards SELECT prune_using_no_values('pruning'); @@ -89,7 +87,7 @@ SELECT print_sorted_shard_intervals('pruning'); -- create range distributed table observe shard pruning CREATE TABLE pruning_range ( species text, last_pruned date, plant_id integer ); -SELECT master_create_distributed_table('pruning_range', 'species', 'range'); +SELECT create_distributed_table('pruning_range', 'species', 'range'); -- create worker shards SELECT master_create_empty_shard('pruning_range'); diff --git a/src/test/regress/sql/multi_repair_shards.sql b/src/test/regress/sql/multi_repair_shards.sql index 7e0a37d06..2d087d78f 100644 --- a/src/test/regress/sql/multi_repair_shards.sql +++ b/src/test/regress/sql/multi_repair_shards.sql @@ -15,10 +15,10 @@ CREATE INDEX ON customer_engagements (created_at); CREATE INDEX ON customer_engagements (event_data); -- distribute the table -SELECT master_create_distributed_table('customer_engagements', 'id', 'hash'); - -- create a single shard on the first worker -SELECT master_create_worker_shards('customer_engagements', 1, 2); +SET citus.shard_count TO 1; +SET citus.shard_replication_factor TO 2; +SELECT create_distributed_table('customer_engagements', 'id', 'hash'); -- ingest some data for the tests INSERT INTO customer_engagements VALUES (1, '01-01-2015', 'first event'); @@ -92,10 +92,10 @@ CREATE FOREIGN TABLE remote_engagements ( ) SERVER fake_fdw_server; -- distribute the table -SELECT master_create_distributed_table('remote_engagements', 'id', 'hash'); - -- create a single shard on the first worker -SELECT master_create_worker_shards('remote_engagements', 1, 2); +SET citus.shard_count TO 1; +SET citus.shard_replication_factor TO 2; +SELECT create_distributed_table('remote_engagements', 'id', 'hash'); -- get the newshardid SELECT shardid as remotenewshardid FROM pg_dist_shard WHERE logicalrelid = 'remote_engagements'::regclass diff --git a/src/test/regress/sql/multi_repartition_udt.sql b/src/test/regress/sql/multi_repartition_udt.sql index 9c46a3ac4..b9c3610f2 100644 --- a/src/test/regress/sql/multi_repartition_udt.sql +++ b/src/test/regress/sql/multi_repartition_udt.sql @@ -167,11 +167,11 @@ FUNCTION 1 test_udt_hash(test_udt); \c - - - :master_port -- Distribute and populate the two tables. - -SELECT master_create_distributed_table('repartition_udt', 'pk', 'hash'); -SELECT master_create_worker_shards('repartition_udt', 3, 1); -SELECT master_create_distributed_table('repartition_udt_other', 'pk', 'hash'); -SELECT master_create_worker_shards('repartition_udt_other', 5, 1); +SET citus.shard_count TO 3; +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('repartition_udt', 'pk', 'hash'); +SET citus.shard_count TO 5; +SELECT create_distributed_table('repartition_udt_other', 'pk', 'hash'); INSERT INTO repartition_udt values (1, '(1,1)'::test_udt, 'foo'); INSERT INTO repartition_udt values (2, '(1,2)'::test_udt, 'foo'); diff --git a/src/test/regress/sql/multi_shard_modify.sql b/src/test/regress/sql/multi_shard_modify.sql index ed9b1e208..2275d0f3d 100644 --- a/src/test/regress/sql/multi_shard_modify.sql +++ b/src/test/regress/sql/multi_shard_modify.sql @@ -11,8 +11,7 @@ CREATE TABLE multi_shard_modify_test ( t_key integer not null, t_name varchar(25) not null, t_value integer not null); -SELECT master_create_distributed_table('multi_shard_modify_test', 't_key', 'hash'); -SELECT master_create_worker_shards('multi_shard_modify_test', 4, 2); +SELECT create_distributed_table('multi_shard_modify_test', 't_key', 'hash'); COPY multi_shard_modify_test (t_key, t_name, t_value) FROM STDIN WITH (FORMAT 'csv'); 1,san francisco,99 @@ -74,8 +73,7 @@ CREATE TABLE temp_nations(name text, key integer); SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test USING temp_nations WHERE multi_shard_modify_test.t_value = temp_nations.key AND temp_nations.name = ''foobar'' '); -- commands with a USING clause are unsupported -SELECT master_create_distributed_table('temp_nations', 'name', 'hash'); -SELECT master_create_worker_shards('temp_nations', 4, 2); +SELECT create_distributed_table('temp_nations', 'name', 'hash'); SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test USING temp_nations WHERE multi_shard_modify_test.t_value = temp_nations.key AND temp_nations.name = ''foobar'' '); -- commands with a RETURNING clause are unsupported diff --git a/src/test/regress/sql/multi_sql_function.sql b/src/test/regress/sql/multi_sql_function.sql index e7ad60638..d6dbc79a2 100644 --- a/src/test/regress/sql/multi_sql_function.sql +++ b/src/test/regress/sql/multi_sql_function.sql @@ -62,8 +62,8 @@ CREATE TABLE temp_table ( key int, value int ); -SELECT master_create_distributed_table('temp_table','key','hash'); -SELECT master_create_worker_shards('temp_table',4,1); +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('temp_table','key','hash'); CREATE FUNCTION no_parameter_insert_sql() RETURNS void AS $$ INSERT INTO temp_table (key) VALUES (0); diff --git a/src/test/regress/sql/multi_subquery.sql b/src/test/regress/sql/multi_subquery.sql index ea7aeb390..f3ea74197 100644 --- a/src/test/regress/sql/multi_subquery.sql +++ b/src/test/regress/sql/multi_subquery.sql @@ -557,9 +557,8 @@ CREATE TABLE subquery_pruning_varchar_test_table a varchar, b int ); - -SELECT master_create_distributed_table('subquery_pruning_varchar_test_table', 'a', 'hash'); -SELECT master_create_worker_shards('subquery_pruning_varchar_test_table', 4, 1); +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('subquery_pruning_varchar_test_table', 'a', 'hash'); -- temporarily disable router executor to test pruning behaviour of subquery pushdown SET citus.enable_router_execution TO off; diff --git a/src/test/regress/sql/multi_subtransactions.sql b/src/test/regress/sql/multi_subtransactions.sql index 54f553192..5865aa467 100644 --- a/src/test/regress/sql/multi_subtransactions.sql +++ b/src/test/regress/sql/multi_subtransactions.sql @@ -114,9 +114,8 @@ CREATE TABLE researchers ( lab_id int NOT NULL, name text NOT NULL ); - -SELECT master_create_distributed_table('researchers', 'lab_id', 'hash'); -SELECT master_create_worker_shards('researchers', 2, 2); +SET citus.shard_count TO 2; +SELECT create_distributed_table('researchers', 'lab_id', 'hash'); -- Basic rollback and release BEGIN; diff --git a/src/test/regress/sql/multi_table_ddl.sql b/src/test/regress/sql/multi_table_ddl.sql index 93d2064ac..5e4fbf548 100644 --- a/src/test/regress/sql/multi_table_ddl.sql +++ b/src/test/regress/sql/multi_table_ddl.sql @@ -8,7 +8,7 @@ SET citus.next_shard_id TO 870000; CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); -SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append'); +SELECT create_distributed_table('testtableddl', 'distributecol', 'append'); -- verify that the citus extension can't be dropped while distributed tables exist DROP EXTENSION citus; @@ -28,7 +28,7 @@ COMMIT; -- recreate testtableddl CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); -SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append'); +SELECT create_distributed_table('testtableddl', 'distributecol', 'append'); -- verify that the table can be dropped DROP TABLE testtableddl; @@ -38,7 +38,7 @@ CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); -- create table and do create empty shard test here, too SET citus.shard_replication_factor TO 1; -SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append'); +SELECT create_distributed_table('testtableddl', 'distributecol', 'append'); SELECT 1 FROM master_create_empty_shard('testtableddl'); -- now actually drop table and shards @@ -61,8 +61,10 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port); -- create a table with a SERIAL column CREATE TABLE testserialtable(id serial, group_id integer); -SELECT master_create_distributed_table('testserialtable', 'group_id', 'hash'); -SELECT master_create_worker_shards('testserialtable', 2, 1); + +SET citus.shard_count TO 2; +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('testserialtable', 'group_id', 'hash'); -- should not be able to add additional serial columns ALTER TABLE testserialtable ADD COLUMN other_id serial; diff --git a/src/test/regress/sql/multi_task_assignment_policy.sql b/src/test/regress/sql/multi_task_assignment_policy.sql index 1df059539..341a4bb76 100644 --- a/src/test/regress/sql/multi_task_assignment_policy.sql +++ b/src/test/regress/sql/multi_task_assignment_policy.sql @@ -19,7 +19,7 @@ SET citus.explain_distributed_queries TO off; -- and check that tasks are assigned to worker nodes as expected. CREATE TABLE task_assignment_test_table (test_id integer); -SELECT master_create_distributed_table('task_assignment_test_table', 'test_id', 'append'); +SELECT create_distributed_table('task_assignment_test_table', 'test_id', 'append'); -- Create logical shards with shardids 200, 201, and 202 diff --git a/src/test/regress/sql/multi_truncate.sql b/src/test/regress/sql/multi_truncate.sql index b085823bf..88c2d044c 100644 --- a/src/test/regress/sql/multi_truncate.sql +++ b/src/test/regress/sql/multi_truncate.sql @@ -10,7 +10,7 @@ SET citus.next_shard_id TO 1210000; -- expect all shards to be dropped -- CREATE TABLE test_truncate_append(a int); -SELECT master_create_distributed_table('test_truncate_append', 'a', 'append'); +SELECT create_distributed_table('test_truncate_append', 'a', 'append'); -- verify no error is thrown when no shards are present TRUNCATE TABLE test_truncate_append; @@ -50,7 +50,7 @@ DROP TABLE test_truncate_append; -- expect shard to be present, data to be truncated -- CREATE TABLE test_truncate_range(a int); -SELECT master_create_distributed_table('test_truncate_range', 'a', 'range'); +SELECT create_distributed_table('test_truncate_range', 'a', 'range'); -- verify no error is thrown when no shards are present TRUNCATE TABLE test_truncate_range; @@ -145,9 +145,9 @@ SELECT count(*) FROM test_truncate_hash; DROP TABLE test_truncate_hash; -- test with table with spaces in it +SET citus.shard_replication_factor TO 1; CREATE TABLE "a b hash" (a int, b int); -SELECT master_create_distributed_table('"a b hash"', 'a', 'hash'); -SELECT master_create_worker_shards('"a b hash"', 4, 1); +SELECT create_distributed_table('"a b hash"', 'a', 'hash'); INSERT INTO "a b hash" values (1, 0); SELECT * from "a b hash"; TRUNCATE TABLE "a b hash"; @@ -157,7 +157,7 @@ DROP TABLE "a b hash"; -- now with append CREATE TABLE "a b append" (a int, b int); -SELECT master_create_distributed_table('"a b append"', 'a', 'append'); +SELECT create_distributed_table('"a b append"', 'a', 'append'); SELECT master_create_empty_shard('"a b append"') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 500 WHERE shardid = :new_shard_id; diff --git a/src/test/regress/sql/multi_upsert.sql b/src/test/regress/sql/multi_upsert.sql index fd2c99274..20af1e0cd 100644 --- a/src/test/regress/sql/multi_upsert.sql +++ b/src/test/regress/sql/multi_upsert.sql @@ -12,8 +12,7 @@ CREATE TABLE upsert_test ); -- distribute the table and create shards -SELECT master_create_distributed_table('upsert_test', 'part_key', 'hash'); -SELECT master_create_worker_shards('upsert_test', '4', '2'); +SELECT create_distributed_table('upsert_test', 'part_key', 'hash'); -- do a regular insert INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1), (2, 2); @@ -116,8 +115,7 @@ CREATE TABLE upsert_test_2 ); -- distribute the table and create shards -SELECT master_create_distributed_table('upsert_test_2', 'part_key', 'hash'); -SELECT master_create_worker_shards('upsert_test_2', '4', '2'); +SELECT create_distributed_table('upsert_test_2', 'part_key', 'hash'); -- now show that Citus works with multiple columns as the PRIMARY KEY, including the partiton key INSERT INTO upsert_test_2 (part_key, other_col) VALUES (1, 1); @@ -137,8 +135,7 @@ CREATE TABLE upsert_test_3 CREATE INDEX idx_ups_test ON upsert_test_3(part_key); -- distribute the table and create shards -SELECT master_create_distributed_table('upsert_test_3', 'part_key', 'hash'); -SELECT master_create_worker_shards('upsert_test_3', '4', '2'); +SELECT create_distributed_table('upsert_test_3', 'part_key', 'hash'); -- since there are no unique indexes, error-out INSERT INTO upsert_test_3 VALUES (1, 0) ON CONFLICT(part_key) DO UPDATE SET count = upsert_test_3.count + 1; @@ -151,8 +148,7 @@ CREATE TABLE upsert_test_4 ); -- distribute the table and create shards -SELECT master_create_distributed_table('upsert_test_4', 'part_key', 'hash'); -SELECT master_create_worker_shards('upsert_test_4', '4', '2'); +SELECT create_distributed_table('upsert_test_4', 'part_key', 'hash'); -- a single row insert INSERT INTO upsert_test_4 VALUES (1, 0); @@ -169,9 +165,9 @@ INSERT INTO upsert_test_4 VALUES (1, 0) ON CONFLICT(part_key) DO UPDATE SET coun SELECT * FROM upsert_test_4; -- now test dropped columns +SET citus.shard_replication_factor TO 1; CREATE TABLE dropcol_distributed(key int primary key, drop1 int, keep1 text, drop2 numeric, keep2 float); -SELECT master_create_distributed_table('dropcol_distributed', 'key', 'hash'); -SELECT master_create_worker_shards('dropcol_distributed', 4, 1); +SELECT create_distributed_table('dropcol_distributed', 'key', 'hash'); INSERT INTO dropcol_distributed AS dropcol (key, keep1, keep2) VALUES (1, '5', 5) ON CONFLICT(key) DO UPDATE SET keep1 = dropcol.keep1; diff --git a/src/test/regress/sql/multi_utilities.sql b/src/test/regress/sql/multi_utilities.sql index b70d7a0af..f852fef9e 100644 --- a/src/test/regress/sql/multi_utilities.sql +++ b/src/test/regress/sql/multi_utilities.sql @@ -7,10 +7,11 @@ SELECT substring(:'server_version', '\d+')::int > 10 as version_above_ten; -- =================================================================== -- test utility statement functionality -- =================================================================== +SET citus.shard_count TO 2; +SET citus.shard_replication_factor TO 1; CREATE TABLE sharded_table ( name text, id bigint ); -SELECT master_create_distributed_table('sharded_table', 'id', 'hash'); -SELECT master_create_worker_shards('sharded_table', 2, 1); +SELECT create_distributed_table('sharded_table', 'id', 'hash'); -- COPY out is supported with distributed tables COPY sharded_table TO STDOUT; @@ -97,9 +98,11 @@ DROP TABLE sharded_table; -- VACUUM tests -- create a table with a single shard (for convenience) +SET citus.shard_count TO 1; +SET citus.shard_replication_factor TO 2; + CREATE TABLE dustbunnies (id integer, name text, age integer); -SELECT master_create_distributed_table('dustbunnies', 'id', 'hash'); -SELECT master_create_worker_shards('dustbunnies', 1, 2); +SELECT create_distributed_table('dustbunnies', 'id', 'hash'); -- add some data to the distributed table \copy dustbunnies (id, name) from stdin with csv