Merge pull request #3546 from citusdata/connection-string-tests-9.2

Regression Tests on an Existing Cluster
pull/3740/head
Halil Ozan Akgül 2020-04-10 16:12:14 +03:00 committed by GitHub
commit 475c98a62a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
50 changed files with 2811 additions and 2160 deletions

View File

@ -115,6 +115,14 @@ check-multi: all
$(pg_regress_multi_check) --load-extension=citus \
-- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/multi_schedule $(EXTRA_TESTS)
check-multi-hyperscale: all
$(pg_regress_multi_check) --conninfo="$(conninfo)" --load-extension=citus \
-- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/multi_schedule_hyperscale $(EXTRA_TESTS)
check-multi-hyperscale-superuser: all
$(pg_regress_multi_check) --conninfo="$(conninfo)" --worker-1-public-hostname="$(worker_1_public_hostname)" --worker-2-public-hostname="$(worker_2_public_hostname)" --load-extension=citus \
-- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/multi_schedule_hyperscale_superuser $(EXTRA_TESTS)
check-multi-vg: all
$(pg_regress_multi_check) --load-extension=citus --valgrind \
--pg_ctl-timeout=360 --connection-timeout=500000 --valgrind-path=valgrind --valgrind-log-file=$(VALGRIND_LOG_FILE) \

View File

@ -2,7 +2,8 @@
# Only run few basic tests to set up a testing environment
# ----------
test: multi_cluster_management
test: multi_test_helpers multi_create_fdw
test: multi_test_helpers multi_test_helpers_superuser multi_create_fdw
test: multi_test_catalog_views
test: multi_create_table multi_behavioral_analytics_create_table
test: multi_load_data
test: multi_create_table_superuser multi_behavioral_analytics_create_table_superuser
test: multi_load_data multi_load_data_superuser

View File

@ -1,5 +1,5 @@
# The basic tests runs analyze which depends on shard numbers
test: multi_test_helpers
test: multi_test_helpers multi_test_helpers_superuser
test: multi_test_catalog_views
test: upgrade_basic_before
test: upgrade_type_before upgrade_ref2ref_before upgrade_distributed_function_before upgrade_rebalance_strategy_before

View File

@ -30,8 +30,15 @@ fi
if test -z "${VANILLATEST:-}"
then
touch "$file1" # when adding a new test the expected file does not exist
sed -Ef "$BASEDIR/normalize.sed" < "$file1" > "$file1.modified"
sed -Ef "$BASEDIR/normalize.sed" < "$file2" > "$file2.modified"
normalize_file="$BASEDIR/normalize.sed"
# when running tests on an existing cluster some changes need to be done on
# normalize.sed file. So a new file is used.
if [[ -f "$BASEDIR/normalize_modified.sed" ]]
then
normalize_file="$BASEDIR/normalize_modified.sed"
fi
sed -Ef "$normalize_file" < "$file1" > "$file1.modified"
sed -Ef "$normalize_file" < "$file2" > "$file2.modified"
"$DIFF" -w $args "$file1.modified" "$file2.modified" | LC_CTYPE=C.UTF-8 diff-filter "$BASEDIR/normalize.sed"
exit ${PIPESTATUS[0]}
else

View File

@ -9,19 +9,19 @@ SELECT run_command_on_workers($$CREATE SCHEMA type_conflict;$$);
-- create a type on a worker that should not cause data loss once overwritten with a type
-- from the coordinator
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
SET citus.enable_ddl_propagation TO off;
SET search_path TO type_conflict;
CREATE TYPE my_precious_type AS (secret text, should bool);
CREATE TABLE local_table (a int, b my_precious_type);
INSERT INTO local_table VALUES (42, ('always bring a towel', true)::my_precious_type);
\c - - - :master_port
\c - - :master_host :master_port
SET search_path TO type_conflict;
-- overwrite the type on the worker from the coordinator. The type should be over written
-- but the data should not have been destroyed
CREATE TYPE my_precious_type AS (scatterd_secret text);
-- verify the data is retained
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
SET search_path TO type_conflict;
-- show fields for table
SELECT pg_class.relname,
@ -45,7 +45,7 @@ SELECT * FROM local_table;
42 | ("always bring a towel",t)
(1 row)
\c - - - :master_port
\c - - :master_host :master_port
SET search_path TO type_conflict;
-- make sure worker_create_or_replace correctly generates new names while types are existing
SELECT worker_create_or_replace_object('CREATE TYPE type_conflict.multi_conflicting_type AS (a int, b int);');

View File

@ -462,13 +462,13 @@ SELECT "Constraint", "Definition" FROM table_checks WHERE relid='products'::regc
---------------------------------------------------------------------
(0 rows)
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.products_1450202'::regclass;
Constraint | Definition
---------------------------------------------------------------------
(0 rows)
\c - - - :master_port
\c - - :master_host :master_port
-- Tests to check the effect of rollback
BEGIN;
-- Add constraints (which will be rollbacked)
@ -482,13 +482,13 @@ SELECT "Constraint", "Definition" FROM table_checks WHERE relid='products'::regc
---------------------------------------------------------------------
(0 rows)
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.products_1450202'::regclass;
Constraint | Definition
---------------------------------------------------------------------
(0 rows)
\c - - - :master_port
\c - - :master_host :master_port
DROP TABLE products;
SET citus.shard_count to 2;
-- Test if the ALTER TABLE %s ADD %s PRIMARY KEY %s works

View File

@ -1,12 +1,11 @@
--
-- MULTI_CREATE_TABLE
--
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 360000;
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 100000;
-- Create new table definitions for use in testing in distributed planning and
-- execution functionality. Also create indexes to boost performance. Since we
-- need to cover both reference join and partitioned join, we have created
-- reference and append distributed version of orders, customer and part tables.
SET citus.next_shard_id TO 360000;
CREATE TABLE lineitem (
l_orderkey bigint not null,
l_partkey integer not null,
@ -180,228 +179,7 @@ SELECT create_distributed_table('supplier_single_shard', 's_suppkey', 'append');
(1 row)
CREATE TABLE mx_table_test (col1 int, col2 text);
-- Since we're superuser, we can set the replication model to 'streaming' to
-- create a one-off MX table... but if we forget to set the replication factor to one,
-- we should see an error reminding us to fix that
SET citus.replication_model TO 'streaming';
SELECT create_distributed_table('mx_table_test', 'col1');
ERROR: replication factors above one are incompatible with the streaming replication model
HINT: Try again after reducing "citus.shard_replication_factor" to one or setting "citus.replication_model" to "statement".
-- ok, so now actually create the one-off MX table
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('mx_table_test', 'col1');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='mx_table_test'::regclass;
repmodel
---------------------------------------------------------------------
s
(1 row)
DROP TABLE mx_table_test;
-- Show that master_create_distributed_table ignores citus.replication_model GUC
CREATE TABLE s_table(a int);
SELECT master_create_distributed_table('s_table', 'a', 'hash');
NOTICE: using statement-based replication
DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table.
HINT: Use create_distributed_table to use the streaming replication model.
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='s_table'::regclass;
repmodel
---------------------------------------------------------------------
c
(1 row)
-- Show that master_create_worker_shards complains when RF>1 and replication model is streaming
UPDATE pg_dist_partition SET repmodel = 's' WHERE logicalrelid='s_table'::regclass;
SELECT master_create_worker_shards('s_table', 4, 2);
ERROR: using replication factor 2 with the streaming replication model is not supported
DETAIL: The table s_table is marked as streaming replicated and the shard replication factor of streaming replicated tables must be 1.
HINT: Use replication factor 1.
DROP TABLE s_table;
RESET citus.replication_model;
-- Show that create_distributed_table with append and range distributions ignore
-- citus.replication_model GUC
SET citus.shard_replication_factor TO 2;
SET citus.replication_model TO streaming;
CREATE TABLE repmodel_test (a int);
SELECT create_distributed_table('repmodel_test', 'a', 'append');
NOTICE: using statement-based replication
DETAIL: Streaming replication is supported only for hash-distributed tables.
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
repmodel
---------------------------------------------------------------------
c
(1 row)
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT create_distributed_table('repmodel_test', 'a', 'range');
NOTICE: using statement-based replication
DETAIL: Streaming replication is supported only for hash-distributed tables.
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
repmodel
---------------------------------------------------------------------
c
(1 row)
DROP TABLE repmodel_test;
-- Show that master_create_distributed_table created statement replicated tables no matter
-- what citus.replication_model set to
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'hash');
NOTICE: using statement-based replication
DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table.
HINT: Use create_distributed_table to use the streaming replication model.
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
repmodel
---------------------------------------------------------------------
c
(1 row)
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'append');
NOTICE: using statement-based replication
DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table.
HINT: Use create_distributed_table to use the streaming replication model.
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
repmodel
---------------------------------------------------------------------
c
(1 row)
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'range');
NOTICE: using statement-based replication
DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table.
HINT: Use create_distributed_table to use the streaming replication model.
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
repmodel
---------------------------------------------------------------------
c
(1 row)
DROP TABLE repmodel_test;
-- Check that the replication_model overwrite behavior is the same with RF=1
SET citus.shard_replication_factor TO 1;
CREATE TABLE repmodel_test (a int);
SELECT create_distributed_table('repmodel_test', 'a', 'append');
NOTICE: using statement-based replication
DETAIL: Streaming replication is supported only for hash-distributed tables.
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
repmodel
---------------------------------------------------------------------
c
(1 row)
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT create_distributed_table('repmodel_test', 'a', 'range');
NOTICE: using statement-based replication
DETAIL: Streaming replication is supported only for hash-distributed tables.
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
repmodel
---------------------------------------------------------------------
c
(1 row)
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'hash');
NOTICE: using statement-based replication
DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table.
HINT: Use create_distributed_table to use the streaming replication model.
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
repmodel
---------------------------------------------------------------------
c
(1 row)
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'append');
NOTICE: using statement-based replication
DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table.
HINT: Use create_distributed_table to use the streaming replication model.
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
repmodel
---------------------------------------------------------------------
c
(1 row)
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'range');
NOTICE: using statement-based replication
DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table.
HINT: Use create_distributed_table to use the streaming replication model.
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
repmodel
---------------------------------------------------------------------
c
(1 row)
DROP TABLE repmodel_test;
RESET citus.replication_model;
SET citus.next_shard_id TO 360009;
-- Test initial data loading
CREATE TABLE data_load_test (col1 int, col2 text, col3 serial);
INSERT INTO data_load_test VALUES (132, 'hello');
@ -527,583 +305,3 @@ WHERE col1 = 132;
DROP TABLE data_load_test1, data_load_test2;
END;
-- There should be no table on the worker node
\c - - - :worker_1_port
SELECT relname FROM pg_class WHERE relname LIKE 'data_load_test%';
relname
---------------------------------------------------------------------
(0 rows)
\c - - - :master_port
-- creating an index after loading data works
BEGIN;
CREATE TABLE data_load_test (col1 int, col2 text, col3 serial);
INSERT INTO data_load_test VALUES (132, 'hello');
SELECT create_distributed_table('data_load_test', 'col1');
NOTICE: Copying data from local table...
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE INDEX data_load_test_idx ON data_load_test (col2);
DROP TABLE data_load_test;
END;
-- popping in and out of existence in the same transaction works
BEGIN;
CREATE TABLE data_load_test (col1 int, col2 text, col3 serial);
INSERT INTO data_load_test VALUES (132, 'hello');
SELECT create_distributed_table('data_load_test', 'col1');
NOTICE: Copying data from local table...
create_distributed_table
---------------------------------------------------------------------
(1 row)
DROP TABLE data_load_test;
END;
-- but dropping after a write on the distributed table is currently disallowed
BEGIN;
CREATE TABLE data_load_test (col1 int, col2 text, col3 serial);
INSERT INTO data_load_test VALUES (132, 'hello');
SELECT create_distributed_table('data_load_test', 'col1');
NOTICE: Copying data from local table...
create_distributed_table
---------------------------------------------------------------------
(1 row)
INSERT INTO data_load_test VALUES (243, 'world');
DROP TABLE data_load_test;
END;
-- Test data loading after dropping a column
CREATE TABLE data_load_test (col1 int, col2 text, col3 text, "CoL4"")" int);
INSERT INTO data_load_test VALUES (132, 'hello', 'world');
INSERT INTO data_load_test VALUES (243, 'world', 'hello');
ALTER TABLE data_load_test DROP COLUMN col1;
SELECT create_distributed_table('data_load_test', 'col3');
NOTICE: Copying data from local table...
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT * FROM data_load_test ORDER BY col2;
col2 | col3 | CoL4")
---------------------------------------------------------------------
hello | world |
world | hello |
(2 rows)
-- make sure the tuple went to the right shard
SELECT * FROM data_load_test WHERE col3 = 'world';
col2 | col3 | CoL4")
---------------------------------------------------------------------
hello | world |
(1 row)
DROP TABLE data_load_test;
SET citus.shard_replication_factor TO default;
SET citus.shard_count to 4;
CREATE TABLE lineitem_hash_part (like lineitem);
SELECT create_distributed_table('lineitem_hash_part', 'l_orderkey');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE TABLE orders_hash_part (like orders);
SELECT create_distributed_table('orders_hash_part', 'o_orderkey');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE UNLOGGED TABLE unlogged_table
(
key text,
value text
);
SELECT create_distributed_table('unlogged_table', 'key');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT * FROM master_get_table_ddl_events('unlogged_table');
master_get_table_ddl_events
---------------------------------------------------------------------
CREATE UNLOGGED TABLE public.unlogged_table (key text, value text)
ALTER TABLE public.unlogged_table OWNER TO postgres
(2 rows)
\c - - - :worker_1_port
SELECT relpersistence FROM pg_class WHERE relname LIKE 'unlogged_table_%';
relpersistence
---------------------------------------------------------------------
u
u
u
u
(4 rows)
\c - - - :master_port
-- Test rollback of create table
BEGIN;
CREATE TABLE rollback_table(id int, name varchar(20));
SELECT create_distributed_table('rollback_table','id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
ROLLBACK;
-- Table should not exist on the worker node
\c - - - :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid FROM pg_class WHERE relname LIKE 'rollback_table%');
Column | Type | Modifiers
---------------------------------------------------------------------
(0 rows)
\c - - - :master_port
-- Insert 3 rows to make sure that copy after shard creation touches the same
-- worker node twice.
BEGIN;
CREATE TABLE rollback_table(id int, name varchar(20));
INSERT INTO rollback_table VALUES(1, 'Name_1');
INSERT INTO rollback_table VALUES(2, 'Name_2');
INSERT INTO rollback_table VALUES(3, 'Name_3');
SELECT create_distributed_table('rollback_table','id');
NOTICE: Copying data from local table...
create_distributed_table
---------------------------------------------------------------------
(1 row)
ROLLBACK;
-- Table should not exist on the worker node
\c - - - :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid FROM pg_class WHERE relname LIKE 'rollback_table%');
Column | Type | Modifiers
---------------------------------------------------------------------
(0 rows)
\c - - - :master_port
BEGIN;
CREATE TABLE rollback_table(id int, name varchar(20));
SELECT create_distributed_table('rollback_table','id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
\copy rollback_table from stdin delimiter ','
CREATE INDEX rollback_index ON rollback_table(id);
COMMIT;
-- Check the table is created
SELECT count(*) FROM rollback_table;
count
---------------------------------------------------------------------
3
(1 row)
DROP TABLE rollback_table;
BEGIN;
CREATE TABLE rollback_table(id int, name varchar(20));
SELECT create_distributed_table('rollback_table','id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
\copy rollback_table from stdin delimiter ','
ROLLBACK;
-- Table should not exist on the worker node
\c - - - :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid FROM pg_class WHERE relname LIKE 'rollback_table%');
Column | Type | Modifiers
---------------------------------------------------------------------
(0 rows)
\c - - - :master_port
BEGIN;
CREATE TABLE tt1(id int);
SELECT create_distributed_table('tt1','id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE TABLE tt2(id int);
SELECT create_distributed_table('tt2','id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
INSERT INTO tt1 VALUES(1);
INSERT INTO tt2 SELECT * FROM tt1 WHERE id = 1;
COMMIT;
-- Table should exist on the worker node
\c - - - :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.tt1_360069'::regclass;
Column | Type | Modifiers
---------------------------------------------------------------------
id | integer |
(1 row)
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.tt2_360073'::regclass;
Column | Type | Modifiers
---------------------------------------------------------------------
id | integer |
(1 row)
\c - - - :master_port
DROP TABLE tt1;
DROP TABLE tt2;
-- It is known that creating a table with master_create_empty_shard is not
-- transactional, so table stay remaining on the worker node after the rollback
BEGIN;
CREATE TABLE append_tt1(id int);
SELECT create_distributed_table('append_tt1','id','append');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT master_create_empty_shard('append_tt1');
master_create_empty_shard
---------------------------------------------------------------------
360077
(1 row)
ROLLBACK;
-- Table exists on the worker node.
\c - - - :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.append_tt1_360077'::regclass;
Column | Type | Modifiers
---------------------------------------------------------------------
id | integer |
(1 row)
\c - - - :master_port
-- There should be no table on the worker node
\c - - - :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid from pg_class WHERE relname LIKE 'public.tt1%');
Column | Type | Modifiers
---------------------------------------------------------------------
(0 rows)
\c - - - :master_port
-- Queries executing with router executor is allowed in the same transaction
-- with create_distributed_table
BEGIN;
CREATE TABLE tt1(id int);
INSERT INTO tt1 VALUES(1);
SELECT create_distributed_table('tt1','id');
NOTICE: Copying data from local table...
create_distributed_table
---------------------------------------------------------------------
(1 row)
INSERT INTO tt1 VALUES(2);
SELECT * FROM tt1 WHERE id = 1;
id
---------------------------------------------------------------------
1
(1 row)
COMMIT;
-- Placements should be created on the worker
\c - - - :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.tt1_360078'::regclass;
Column | Type | Modifiers
---------------------------------------------------------------------
id | integer |
(1 row)
\c - - - :master_port
DROP TABLE tt1;
BEGIN;
CREATE TABLE tt1(id int);
SELECT create_distributed_table('tt1','id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
DROP TABLE tt1;
COMMIT;
-- There should be no table on the worker node
\c - - - :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid from pg_class WHERE relname LIKE 'tt1%');
Column | Type | Modifiers
---------------------------------------------------------------------
(0 rows)
\c - - - :master_port
-- Tests with create_distributed_table & DDL & DML commands
-- Test should pass since GetPlacementListConnection can provide connections
-- in this order of execution
CREATE TABLE sample_table(id int);
SELECT create_distributed_table('sample_table','id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
BEGIN;
CREATE TABLE stage_table (LIKE sample_table);
\COPY stage_table FROM stdin; -- Note that this operation is a local copy
SELECT create_distributed_table('stage_table', 'id');
NOTICE: Copying data from local table...
create_distributed_table
---------------------------------------------------------------------
(1 row)
INSERT INTO sample_table SELECT * FROM stage_table;
DROP TABLE stage_table;
SELECT * FROM sample_table WHERE id = 3;
id
---------------------------------------------------------------------
3
(1 row)
COMMIT;
-- Show that rows of sample_table are updated
SELECT count(*) FROM sample_table;
count
---------------------------------------------------------------------
4
(1 row)
DROP table sample_table;
-- Test as create_distributed_table - copy - create_distributed_table - copy
-- This combination is used by tests written by some ORMs.
BEGIN;
CREATE TABLE tt1(id int);
SELECT create_distributed_table('tt1','id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
\COPY tt1 from stdin;
CREATE TABLE tt2(like tt1);
SELECT create_distributed_table('tt2','id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
\COPY tt2 from stdin;
INSERT INTO tt1 SELECT * FROM tt2;
SELECT * FROM tt1 WHERE id = 3;
id
---------------------------------------------------------------------
3
(1 row)
SELECT * FROM tt2 WHERE id = 6;
id
---------------------------------------------------------------------
6
(1 row)
END;
SELECT count(*) FROM tt1;
count
---------------------------------------------------------------------
6
(1 row)
-- the goal of the following test is to make sure that
-- both create_reference_table and create_distributed_table
-- calls creates the schemas without leading to any deadlocks
-- first create reference table, then hash distributed table
BEGIN;
CREATE SCHEMA sc;
CREATE TABLE sc.ref(a int);
insert into sc.ref SELECT s FROM generate_series(0, 100) s;
SELECT create_reference_table('sc.ref');
NOTICE: Copying data from local table...
create_reference_table
---------------------------------------------------------------------
(1 row)
CREATE TABLE sc.hash(a int);
insert into sc.hash SELECT s FROM generate_series(0, 100) s;
SELECT create_distributed_table('sc.hash', 'a');
NOTICE: Copying data from local table...
create_distributed_table
---------------------------------------------------------------------
(1 row)
COMMIT;
-- first create hash distributed table, then reference table
BEGIN;
CREATE SCHEMA sc2;
CREATE TABLE sc2.hash(a int);
insert into sc2.hash SELECT s FROM generate_series(0, 100) s;
SELECT create_distributed_table('sc2.hash', 'a');
NOTICE: Copying data from local table...
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE TABLE sc2.ref(a int);
insert into sc2.ref SELECT s FROM generate_series(0, 100) s;
SELECT create_reference_table('sc2.ref');
NOTICE: Copying data from local table...
create_reference_table
---------------------------------------------------------------------
(1 row)
COMMIT;
SET citus.shard_count TO 4;
BEGIN;
CREATE SCHEMA sc3;
CREATE TABLE sc3.alter_replica_table
(
name text,
id int PRIMARY KEY
);
ALTER TABLE sc3.alter_replica_table REPLICA IDENTITY USING INDEX alter_replica_table_pkey;
SELECT create_distributed_table('sc3.alter_replica_table', 'id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
COMMIT;
SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='sc3' LIMIT 1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,i)
(localhost,57638,t,i)
(2 rows)
BEGIN;
CREATE SCHEMA sc4;
CREATE TABLE sc4.alter_replica_table
(
name text,
id int PRIMARY KEY
);
INSERT INTO sc4.alter_replica_table(id) SELECT generate_series(1,100);
SET search_path = 'sc4';
ALTER TABLE alter_replica_table REPLICA IDENTITY USING INDEX alter_replica_table_pkey;
SELECT create_distributed_table('alter_replica_table', 'id');
NOTICE: Copying data from local table...
create_distributed_table
---------------------------------------------------------------------
(1 row)
COMMIT;
SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='sc4' LIMIT 1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,i)
(localhost,57638,t,i)
(2 rows)
SET search_path = 'public';
BEGIN;
CREATE SCHEMA sc5;
CREATE TABLE sc5.alter_replica_table
(
name text,
id int NOT NULL
);
INSERT INTO sc5.alter_replica_table(id) SELECT generate_series(1,100);
ALTER TABLE sc5.alter_replica_table REPLICA IDENTITY FULL;
SELECT create_distributed_table('sc5.alter_replica_table', 'id');
NOTICE: Copying data from local table...
create_distributed_table
---------------------------------------------------------------------
(1 row)
COMMIT;
SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='sc5' LIMIT 1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,f)
(localhost,57638,t,f)
(2 rows)
BEGIN;
CREATE SCHEMA sc6;
CREATE TABLE sc6.alter_replica_table
(
name text,
id int NOT NULL
);
INSERT INTO sc6.alter_replica_table(id) SELECT generate_series(1,100);
CREATE UNIQUE INDEX unique_idx ON sc6.alter_replica_table(id);
ALTER TABLE sc6.alter_replica_table REPLICA IDENTITY USING INDEX unique_idx;
SELECT create_distributed_table('sc6.alter_replica_table', 'id');
NOTICE: Copying data from local table...
create_distributed_table
---------------------------------------------------------------------
(1 row)
COMMIT;
SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='sc6' LIMIT 1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,i)
(localhost,57638,t,i)
(2 rows)
BEGIN;
CREATE TABLE alter_replica_table
(
name text,
id int NOT NULL
);
INSERT INTO alter_replica_table(id) SELECT generate_series(1,100);
CREATE UNIQUE INDEX unique_idx ON alter_replica_table(id);
ALTER TABLE alter_replica_table REPLICA IDENTITY USING INDEX unique_idx;
SELECT create_distributed_table('alter_replica_table', 'id');
NOTICE: Copying data from local table...
create_distributed_table
---------------------------------------------------------------------
(1 row)
COMMIT;
SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='public' LIMIT 1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,i)
(localhost,57638,t,i)
(2 rows)
DROP TABLE tt1;
DROP TABLE tt2;
DROP TABLE alter_replica_table;
DROP SCHEMA sc CASCADE;
NOTICE: drop cascades to 2 other objects
DETAIL: drop cascades to table sc.ref
drop cascades to table sc.hash
DROP SCHEMA sc2 CASCADE;
NOTICE: drop cascades to 2 other objects
DETAIL: drop cascades to table sc2.hash
drop cascades to table sc2.ref
DROP SCHEMA sc3 CASCADE;
NOTICE: drop cascades to table sc3.alter_replica_table
DROP SCHEMA sc4 CASCADE;
NOTICE: drop cascades to table sc4.alter_replica_table
DROP SCHEMA sc5 CASCADE;
NOTICE: drop cascades to table sc5.alter_replica_table
DROP SCHEMA sc6 CASCADE;
NOTICE: drop cascades to table sc6.alter_replica_table

View File

@ -334,7 +334,7 @@ SELECT create_distributed_table('check_example', 'partition_col', 'hash');
(1 row)
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
relid = 'check_example_partition_col_key_365056'::regclass;
Column | Type | Definition
@ -349,7 +349,7 @@ SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.check_ex
check_example_other_other_col_check | CHECK (abs(other_other_col) >= 100)
(2 rows)
\c - - - :master_port
\c - - :master_host :master_port
-- Index-based constraints are created with shard-extended names, but others
-- (e.g. expression-based table CHECK constraints) do _not_ have shardids in
-- their object names, _at least originally as designed_. At some point, we

View File

@ -0,0 +1,805 @@
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 360005;
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 100000;
-- Since we're superuser, we can set the replication model to 'streaming' to
-- create a one-off MX table... but if we forget to set the replication factor to one,
-- we should see an error reminding us to fix that
SET citus.replication_model TO 'streaming';
SELECT create_distributed_table('mx_table_test', 'col1');
ERROR: replication factors above one are incompatible with the streaming replication model
HINT: Try again after reducing "citus.shard_replication_factor" to one or setting "citus.replication_model" to "statement".
-- ok, so now actually create the one-off MX table
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('mx_table_test', 'col1');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='mx_table_test'::regclass;
repmodel
---------------------------------------------------------------------
s
(1 row)
DROP TABLE mx_table_test;
-- Show that master_create_distributed_table ignores citus.replication_model GUC
CREATE TABLE s_table(a int);
SELECT master_create_distributed_table('s_table', 'a', 'hash');
NOTICE: using statement-based replication
DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table.
HINT: Use create_distributed_table to use the streaming replication model.
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='s_table'::regclass;
repmodel
---------------------------------------------------------------------
c
(1 row)
-- Show that master_create_worker_shards complains when RF>1 and replication model is streaming
UPDATE pg_dist_partition SET repmodel = 's' WHERE logicalrelid='s_table'::regclass;
SELECT master_create_worker_shards('s_table', 4, 2);
ERROR: using replication factor 2 with the streaming replication model is not supported
DETAIL: The table s_table is marked as streaming replicated and the shard replication factor of streaming replicated tables must be 1.
HINT: Use replication factor 1.
DROP TABLE s_table;
RESET citus.replication_model;
-- Show that create_distributed_table with append and range distributions ignore
-- citus.replication_model GUC
SET citus.shard_replication_factor TO 2;
SET citus.replication_model TO streaming;
CREATE TABLE repmodel_test (a int);
SELECT create_distributed_table('repmodel_test', 'a', 'append');
NOTICE: using statement-based replication
DETAIL: Streaming replication is supported only for hash-distributed tables.
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
repmodel
---------------------------------------------------------------------
c
(1 row)
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT create_distributed_table('repmodel_test', 'a', 'range');
NOTICE: using statement-based replication
DETAIL: Streaming replication is supported only for hash-distributed tables.
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
repmodel
---------------------------------------------------------------------
c
(1 row)
DROP TABLE repmodel_test;
-- Show that master_create_distributed_table created statement replicated tables no matter
-- what citus.replication_model set to
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'hash');
NOTICE: using statement-based replication
DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table.
HINT: Use create_distributed_table to use the streaming replication model.
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
repmodel
---------------------------------------------------------------------
c
(1 row)
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'append');
NOTICE: using statement-based replication
DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table.
HINT: Use create_distributed_table to use the streaming replication model.
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
repmodel
---------------------------------------------------------------------
c
(1 row)
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'range');
NOTICE: using statement-based replication
DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table.
HINT: Use create_distributed_table to use the streaming replication model.
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
repmodel
---------------------------------------------------------------------
c
(1 row)
DROP TABLE repmodel_test;
-- Check that the replication_model overwrite behavior is the same with RF=1
SET citus.shard_replication_factor TO 1;
CREATE TABLE repmodel_test (a int);
SELECT create_distributed_table('repmodel_test', 'a', 'append');
NOTICE: using statement-based replication
DETAIL: Streaming replication is supported only for hash-distributed tables.
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
repmodel
---------------------------------------------------------------------
c
(1 row)
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT create_distributed_table('repmodel_test', 'a', 'range');
NOTICE: using statement-based replication
DETAIL: Streaming replication is supported only for hash-distributed tables.
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
repmodel
---------------------------------------------------------------------
c
(1 row)
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'hash');
NOTICE: using statement-based replication
DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table.
HINT: Use create_distributed_table to use the streaming replication model.
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
repmodel
---------------------------------------------------------------------
c
(1 row)
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'append');
NOTICE: using statement-based replication
DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table.
HINT: Use create_distributed_table to use the streaming replication model.
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
repmodel
---------------------------------------------------------------------
c
(1 row)
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'range');
NOTICE: using statement-based replication
DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table.
HINT: Use create_distributed_table to use the streaming replication model.
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
repmodel
---------------------------------------------------------------------
c
(1 row)
DROP TABLE repmodel_test;
RESET citus.replication_model;
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 360025;
-- There should be no table on the worker node
\c - - :public_worker_1_host :worker_1_port
SELECT relname FROM pg_class WHERE relname LIKE 'data_load_test%';
relname
---------------------------------------------------------------------
(0 rows)
\c - - :master_host :master_port
-- creating an index after loading data works
BEGIN;
CREATE TABLE data_load_test (col1 int, col2 text, col3 serial);
INSERT INTO data_load_test VALUES (132, 'hello');
SELECT create_distributed_table('data_load_test', 'col1');
NOTICE: Copying data from local table...
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE INDEX data_load_test_idx ON data_load_test (col2);
DROP TABLE data_load_test;
END;
-- popping in and out of existence in the same transaction works
BEGIN;
CREATE TABLE data_load_test (col1 int, col2 text, col3 serial);
INSERT INTO data_load_test VALUES (132, 'hello');
SELECT create_distributed_table('data_load_test', 'col1');
NOTICE: Copying data from local table...
create_distributed_table
---------------------------------------------------------------------
(1 row)
DROP TABLE data_load_test;
END;
-- but dropping after a write on the distributed table is currently disallowed
BEGIN;
CREATE TABLE data_load_test (col1 int, col2 text, col3 serial);
INSERT INTO data_load_test VALUES (132, 'hello');
SELECT create_distributed_table('data_load_test', 'col1');
NOTICE: Copying data from local table...
create_distributed_table
---------------------------------------------------------------------
(1 row)
INSERT INTO data_load_test VALUES (243, 'world');
DROP TABLE data_load_test;
END;
-- Test data loading after dropping a column
CREATE TABLE data_load_test (col1 int, col2 text, col3 text, "CoL4"")" int);
INSERT INTO data_load_test VALUES (132, 'hello', 'world');
INSERT INTO data_load_test VALUES (243, 'world', 'hello');
ALTER TABLE data_load_test DROP COLUMN col1;
SELECT create_distributed_table('data_load_test', 'col3');
NOTICE: Copying data from local table...
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT * FROM data_load_test ORDER BY col2;
col2 | col3 | CoL4")
---------------------------------------------------------------------
hello | world |
world | hello |
(2 rows)
-- make sure the tuple went to the right shard
SELECT * FROM data_load_test WHERE col3 = 'world';
col2 | col3 | CoL4")
---------------------------------------------------------------------
hello | world |
(1 row)
DROP TABLE data_load_test;
SET citus.shard_replication_factor TO default;
SET citus.shard_count to 4;
CREATE TABLE lineitem_hash_part (like lineitem);
SELECT create_distributed_table('lineitem_hash_part', 'l_orderkey');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE TABLE orders_hash_part (like orders);
SELECT create_distributed_table('orders_hash_part', 'o_orderkey');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE UNLOGGED TABLE unlogged_table
(
key text,
value text
);
SELECT create_distributed_table('unlogged_table', 'key');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT * FROM master_get_table_ddl_events('unlogged_table');
master_get_table_ddl_events
---------------------------------------------------------------------
CREATE UNLOGGED TABLE public.unlogged_table (key text, value text)
ALTER TABLE public.unlogged_table OWNER TO postgres
(2 rows)
\c - - :public_worker_1_host :worker_1_port
SELECT relpersistence FROM pg_class WHERE relname LIKE 'unlogged_table_%';
relpersistence
---------------------------------------------------------------------
u
u
u
u
(4 rows)
\c - - :master_host :master_port
-- Test rollback of create table
BEGIN;
CREATE TABLE rollback_table(id int, name varchar(20));
SELECT create_distributed_table('rollback_table','id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
ROLLBACK;
-- Table should not exist on the worker node
\c - - :public_worker_1_host :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid FROM pg_class WHERE relname LIKE 'rollback_table%');
Column | Type | Modifiers
---------------------------------------------------------------------
(0 rows)
\c - - :master_host :master_port
-- Insert 3 rows to make sure that copy after shard creation touches the same
-- worker node twice.
BEGIN;
CREATE TABLE rollback_table(id int, name varchar(20));
INSERT INTO rollback_table VALUES(1, 'Name_1');
INSERT INTO rollback_table VALUES(2, 'Name_2');
INSERT INTO rollback_table VALUES(3, 'Name_3');
SELECT create_distributed_table('rollback_table','id');
NOTICE: Copying data from local table...
create_distributed_table
---------------------------------------------------------------------
(1 row)
ROLLBACK;
-- Table should not exist on the worker node
\c - - :public_worker_1_host :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid FROM pg_class WHERE relname LIKE 'rollback_table%');
Column | Type | Modifiers
---------------------------------------------------------------------
(0 rows)
\c - - :master_host :master_port
BEGIN;
CREATE TABLE rollback_table(id int, name varchar(20));
SELECT create_distributed_table('rollback_table','id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
\copy rollback_table from stdin delimiter ','
CREATE INDEX rollback_index ON rollback_table(id);
COMMIT;
-- Check the table is created
SELECT count(*) FROM rollback_table;
count
---------------------------------------------------------------------
3
(1 row)
DROP TABLE rollback_table;
BEGIN;
CREATE TABLE rollback_table(id int, name varchar(20));
SELECT create_distributed_table('rollback_table','id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
\copy rollback_table from stdin delimiter ','
ROLLBACK;
-- Table should not exist on the worker node
\c - - :public_worker_1_host :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid FROM pg_class WHERE relname LIKE 'rollback_table%');
Column | Type | Modifiers
---------------------------------------------------------------------
(0 rows)
\c - - :master_host :master_port
BEGIN;
CREATE TABLE tt1(id int);
SELECT create_distributed_table('tt1','id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE TABLE tt2(id int);
SELECT create_distributed_table('tt2','id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
INSERT INTO tt1 VALUES(1);
INSERT INTO tt2 SELECT * FROM tt1 WHERE id = 1;
COMMIT;
-- Table should exist on the worker node
\c - - :public_worker_1_host :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.tt1_360069'::regclass;
Column | Type | Modifiers
---------------------------------------------------------------------
id | integer |
(1 row)
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.tt2_360073'::regclass;
Column | Type | Modifiers
---------------------------------------------------------------------
id | integer |
(1 row)
\c - - :master_host :master_port
DROP TABLE tt1;
DROP TABLE tt2;
-- It is known that creating a table with master_create_empty_shard is not
-- transactional, so table stay remaining on the worker node after the rollback
BEGIN;
CREATE TABLE append_tt1(id int);
SELECT create_distributed_table('append_tt1','id','append');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT master_create_empty_shard('append_tt1');
master_create_empty_shard
---------------------------------------------------------------------
360077
(1 row)
ROLLBACK;
-- Table exists on the worker node.
\c - - :public_worker_1_host :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.append_tt1_360077'::regclass;
Column | Type | Modifiers
---------------------------------------------------------------------
id | integer |
(1 row)
\c - - :master_host :master_port
-- There should be no table on the worker node
\c - - :public_worker_1_host :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid from pg_class WHERE relname LIKE 'public.tt1%');
Column | Type | Modifiers
---------------------------------------------------------------------
(0 rows)
\c - - :master_host :master_port
-- Queries executing with router executor is allowed in the same transaction
-- with create_distributed_table
BEGIN;
CREATE TABLE tt1(id int);
INSERT INTO tt1 VALUES(1);
SELECT create_distributed_table('tt1','id');
NOTICE: Copying data from local table...
create_distributed_table
---------------------------------------------------------------------
(1 row)
INSERT INTO tt1 VALUES(2);
SELECT * FROM tt1 WHERE id = 1;
id
---------------------------------------------------------------------
1
(1 row)
COMMIT;
-- Placements should be created on the worker
\c - - :public_worker_1_host :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.tt1_360078'::regclass;
Column | Type | Modifiers
---------------------------------------------------------------------
id | integer |
(1 row)
\c - - :master_host :master_port
DROP TABLE tt1;
BEGIN;
CREATE TABLE tt1(id int);
SELECT create_distributed_table('tt1','id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
DROP TABLE tt1;
COMMIT;
-- There should be no table on the worker node
\c - - :public_worker_1_host :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid from pg_class WHERE relname LIKE 'tt1%');
Column | Type | Modifiers
---------------------------------------------------------------------
(0 rows)
\c - - :master_host :master_port
-- Tests with create_distributed_table & DDL & DML commands
-- Test should pass since GetPlacementListConnection can provide connections
-- in this order of execution
CREATE TABLE sample_table(id int);
SELECT create_distributed_table('sample_table','id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
BEGIN;
CREATE TABLE stage_table (LIKE sample_table);
\COPY stage_table FROM stdin; -- Note that this operation is a local copy
SELECT create_distributed_table('stage_table', 'id');
NOTICE: Copying data from local table...
create_distributed_table
---------------------------------------------------------------------
(1 row)
INSERT INTO sample_table SELECT * FROM stage_table;
DROP TABLE stage_table;
SELECT * FROM sample_table WHERE id = 3;
id
---------------------------------------------------------------------
3
(1 row)
COMMIT;
-- Show that rows of sample_table are updated
SELECT count(*) FROM sample_table;
count
---------------------------------------------------------------------
4
(1 row)
DROP table sample_table;
-- Test as create_distributed_table - copy - create_distributed_table - copy
-- This combination is used by tests written by some ORMs.
BEGIN;
CREATE TABLE tt1(id int);
SELECT create_distributed_table('tt1','id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
\COPY tt1 from stdin;
CREATE TABLE tt2(like tt1);
SELECT create_distributed_table('tt2','id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
\COPY tt2 from stdin;
INSERT INTO tt1 SELECT * FROM tt2;
SELECT * FROM tt1 WHERE id = 3;
id
---------------------------------------------------------------------
3
(1 row)
SELECT * FROM tt2 WHERE id = 6;
id
---------------------------------------------------------------------
6
(1 row)
END;
SELECT count(*) FROM tt1;
count
---------------------------------------------------------------------
6
(1 row)
-- the goal of the following test is to make sure that
-- both create_reference_table and create_distributed_table
-- calls creates the schemas without leading to any deadlocks
-- first create reference table, then hash distributed table
BEGIN;
CREATE SCHEMA sc;
CREATE TABLE sc.ref(a int);
insert into sc.ref SELECT s FROM generate_series(0, 100) s;
SELECT create_reference_table('sc.ref');
NOTICE: Copying data from local table...
create_reference_table
---------------------------------------------------------------------
(1 row)
CREATE TABLE sc.hash(a int);
insert into sc.hash SELECT s FROM generate_series(0, 100) s;
SELECT create_distributed_table('sc.hash', 'a');
NOTICE: Copying data from local table...
create_distributed_table
---------------------------------------------------------------------
(1 row)
COMMIT;
-- first create hash distributed table, then reference table
BEGIN;
CREATE SCHEMA sc2;
CREATE TABLE sc2.hash(a int);
insert into sc2.hash SELECT s FROM generate_series(0, 100) s;
SELECT create_distributed_table('sc2.hash', 'a');
NOTICE: Copying data from local table...
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE TABLE sc2.ref(a int);
insert into sc2.ref SELECT s FROM generate_series(0, 100) s;
SELECT create_reference_table('sc2.ref');
NOTICE: Copying data from local table...
create_reference_table
---------------------------------------------------------------------
(1 row)
COMMIT;
SET citus.shard_count TO 4;
BEGIN;
CREATE SCHEMA sc3;
CREATE TABLE sc3.alter_replica_table
(
name text,
id int PRIMARY KEY
);
ALTER TABLE sc3.alter_replica_table REPLICA IDENTITY USING INDEX alter_replica_table_pkey;
SELECT create_distributed_table('sc3.alter_replica_table', 'id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
COMMIT;
SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='sc3' LIMIT 1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,i)
(localhost,57638,t,i)
(2 rows)
BEGIN;
CREATE SCHEMA sc4;
CREATE TABLE sc4.alter_replica_table
(
name text,
id int PRIMARY KEY
);
INSERT INTO sc4.alter_replica_table(id) SELECT generate_series(1,100);
SET search_path = 'sc4';
ALTER TABLE alter_replica_table REPLICA IDENTITY USING INDEX alter_replica_table_pkey;
SELECT create_distributed_table('alter_replica_table', 'id');
NOTICE: Copying data from local table...
create_distributed_table
---------------------------------------------------------------------
(1 row)
COMMIT;
SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='sc4' LIMIT 1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,i)
(localhost,57638,t,i)
(2 rows)
SET search_path = 'public';
BEGIN;
CREATE SCHEMA sc5;
CREATE TABLE sc5.alter_replica_table
(
name text,
id int NOT NULL
);
INSERT INTO sc5.alter_replica_table(id) SELECT generate_series(1,100);
ALTER TABLE sc5.alter_replica_table REPLICA IDENTITY FULL;
SELECT create_distributed_table('sc5.alter_replica_table', 'id');
NOTICE: Copying data from local table...
create_distributed_table
---------------------------------------------------------------------
(1 row)
COMMIT;
SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='sc5' LIMIT 1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,f)
(localhost,57638,t,f)
(2 rows)
BEGIN;
CREATE SCHEMA sc6;
CREATE TABLE sc6.alter_replica_table
(
name text,
id int NOT NULL
);
INSERT INTO sc6.alter_replica_table(id) SELECT generate_series(1,100);
CREATE UNIQUE INDEX unique_idx ON sc6.alter_replica_table(id);
ALTER TABLE sc6.alter_replica_table REPLICA IDENTITY USING INDEX unique_idx;
SELECT create_distributed_table('sc6.alter_replica_table', 'id');
NOTICE: Copying data from local table...
create_distributed_table
---------------------------------------------------------------------
(1 row)
COMMIT;
SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='sc6' LIMIT 1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,i)
(localhost,57638,t,i)
(2 rows)
BEGIN;
CREATE TABLE alter_replica_table
(
name text,
id int NOT NULL
);
INSERT INTO alter_replica_table(id) SELECT generate_series(1,100);
CREATE UNIQUE INDEX unique_idx ON alter_replica_table(id);
ALTER TABLE alter_replica_table REPLICA IDENTITY USING INDEX unique_idx;
SELECT create_distributed_table('alter_replica_table', 'id');
NOTICE: Copying data from local table...
create_distributed_table
---------------------------------------------------------------------
(1 row)
COMMIT;
SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='public' LIMIT 1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,i)
(localhost,57638,t,i)
(2 rows)
DROP TABLE tt1;
DROP TABLE tt2;
DROP TABLE alter_replica_table;
DROP SCHEMA sc CASCADE;
NOTICE: drop cascades to 2 other objects
DETAIL: drop cascades to table sc.ref
drop cascades to table sc.hash
DROP SCHEMA sc2 CASCADE;
NOTICE: drop cascades to 2 other objects
DETAIL: drop cascades to table sc2.hash
drop cascades to table sc2.ref
DROP SCHEMA sc3 CASCADE;
NOTICE: drop cascades to table sc3.alter_replica_table
DROP SCHEMA sc4 CASCADE;
NOTICE: drop cascades to table sc4.alter_replica_table
DROP SCHEMA sc5 CASCADE;
NOTICE: drop cascades to table sc5.alter_replica_table
DROP SCHEMA sc6 CASCADE;
NOTICE: drop cascades to table sc6.alter_replica_table

View File

@ -89,7 +89,7 @@ BEGIN;
52 | 52 | Wed Dec 31 16:00:00 2014 PST | t
(1 row)
\c - - - :master_port
\c - - :master_host :master_port
SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id();
initiator_node_identifier | transaction_number | transaction_stamp | ?column?
---------------------------------------------------------------------

View File

@ -145,7 +145,7 @@ NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined
ALTER FOREIGN TABLE foreign_table rename to renamed_foreign_table;
ALTER FOREIGN TABLE renamed_foreign_table rename full_name to rename_name;
ALTER FOREIGN TABLE renamed_foreign_table alter rename_name type char(8);
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
select table_name, column_name, data_type
from information_schema.columns
where table_schema='public' and table_name like 'renamed_foreign_table_%' and column_name <> 'id'
@ -158,7 +158,7 @@ order by table_name;
renamed_foreign_table_610003 | rename_name | character
(4 rows)
\c - - - :master_port
\c - - :master_host :master_port
SELECT master_get_table_ddl_events('renamed_foreign_table');
NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined
master_get_table_ddl_events
@ -175,7 +175,7 @@ ERROR: local_view is not a regular, foreign or partitioned table
-- clean up
DROP VIEW IF EXISTS local_view;
DROP FOREIGN TABLE IF EXISTS renamed_foreign_table;
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
select table_name, column_name, data_type
from information_schema.columns
where table_schema='public' and table_name like 'renamed_foreign_table_%' and column_name <> 'id'
@ -184,7 +184,7 @@ order by table_name;
---------------------------------------------------------------------
(0 rows)
\c - - - :master_port
\c - - :master_host :master_port
DROP TABLE IF EXISTS simple_table, not_null_table, column_constraint_table,
table_constraint_table, default_value_table, pkey_table,
unique_table, clustered_table, fiddly_table;

View File

@ -20,7 +20,7 @@ SELECT master_create_worker_shards('too_long_12345678901234567890123456789012345
(1 row)
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
\dt too_long_*
List of relations
Schema | Name | Type | Owner
@ -29,7 +29,7 @@ SELECT master_create_worker_shards('too_long_12345678901234567890123456789012345
public | too_long_12345678901234567890123456789012345678_e0119164_225001 | table | postgres
(2 rows)
\c - - - :master_port
\c - - :master_host :master_port
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 2;
-- Verify that the UDF works and rejects bad arguments.
@ -80,7 +80,7 @@ ALTER TABLE name_lengths ADD EXCLUDE (int_col_1234567890123456789012345678901234
ERROR: cannot create constraint without a name on a distributed table
ALTER TABLE name_lengths ADD CHECK (date_col_12345678901234567890123456789012345678901234567890 > '2014-01-01'::date);
ERROR: cannot create constraint without a name on a distributed table
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.name_lengths_225002'::regclass ORDER BY 1 DESC, 2 DESC;
Column | Type | Modifiers
---------------------------------------------------------------------
@ -91,7 +91,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.name_le
col1 | integer | not null
(5 rows)
\c - - - :master_port
\c - - :master_host :master_port
-- Placeholders for unsupported add constraints with EXPLICIT names that are too long
ALTER TABLE name_lengths ADD CONSTRAINT nl_unique_12345678901234567890123456789012345678901234567890 UNIQUE (float_col_12345678901234567890123456789012345678901234567890);
ERROR: cannot create constraint on "name_lengths"
@ -100,14 +100,14 @@ ALTER TABLE name_lengths ADD CONSTRAINT nl_exclude_12345678901234567890123456789
ERROR: cannot create constraint on "name_lengths"
DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE).
ALTER TABLE name_lengths ADD CONSTRAINT nl_checky_12345678901234567890123456789012345678901234567890 CHECK (date_col_12345678901234567890123456789012345678901234567890 >= '2014-01-01'::date);
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.name_lengths_225002'::regclass ORDER BY 1 DESC, 2 DESC;
Constraint | Definition
---------------------------------------------------------------------
nl_checky_1234567890123456789012345678901234567_b16df46d_225002 | CHECK (date_col_12345678901234567890123456789012345678901234567890 >= '01-01-2014'::date)
(1 row)
\c - - - :master_port
\c - - :master_host :master_port
-- Placeholders for RENAME operations
\set VERBOSITY TERSE
ALTER TABLE name_lengths RENAME TO name_len_12345678901234567890123456789012345678901234567890;
@ -117,7 +117,7 @@ ERROR: renaming constraints belonging to distributed tables is currently unsupp
\set VERBOSITY DEFAULT
-- Verify that CREATE INDEX on already distributed table has proper shard names.
CREATE INDEX tmp_idx_12345678901234567890123456789012345678901234567890 ON name_lengths(col2);
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE
relname LIKE 'tmp_idx_%' ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC;
relname | Column | Type | Definition
@ -126,12 +126,12 @@ SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE
tmp_idx_123456789012345678901234567890123456789_5e470afa_225002 | col2 | integer | col2
(2 rows)
\c - - - :master_port
\c - - :master_host :master_port
-- Verify that a new index name > 63 characters is auto-truncated
-- by the parser/rewriter before further processing, just as in Postgres.
CREATE INDEX tmp_idx_123456789012345678901234567890123456789012345678901234567890 ON name_lengths(col2);
NOTICE: identifier "tmp_idx_123456789012345678901234567890123456789012345678901234567890" will be truncated to "tmp_idx_1234567890123456789012345678901234567890123456789012345"
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE
relname LIKE 'tmp_idx_%' ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC;
relname | Column | Type | Definition
@ -142,7 +142,7 @@ SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE
tmp_idx_123456789012345678901234567890123456789_599636aa_225002 | col2 | integer | col2
(4 rows)
\c - - - :master_port
\c - - :master_host :master_port
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 2;
-- Verify that distributed tables with too-long names
@ -190,7 +190,7 @@ SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2');
(1 row)
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
\di public.sneaky*225006
List of relations
Schema | Name | Type | Owner | Table
@ -204,7 +204,7 @@ SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.sneaky_n
checky_12345678901234567890123456789012345678901234567890 | CHECK (int_col_123456789012345678901234567890123456789012345678901234 > 100)
(1 row)
\c - - - :master_port
\c - - :master_host :master_port
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 2;
DROP TABLE sneaky_name_lengths CASCADE;
@ -221,7 +221,7 @@ SELECT create_distributed_table('sneaky_name_lengths', 'col1', 'hash');
(1 row)
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
\di unique*225008
List of relations
Schema | Name | Type | Owner | Table
@ -229,7 +229,7 @@ SELECT create_distributed_table('sneaky_name_lengths', 'col1', 'hash');
public | unique_1234567890123456789012345678901234567890_a5986f27_225008 | index | postgres | sneaky_name_lengths_225008
(1 row)
\c - - - :master_port
\c - - :master_host :master_port
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 2;
DROP TABLE sneaky_name_lengths CASCADE;
@ -244,7 +244,7 @@ SELECT create_distributed_table('too_long_12345678901234567890123456789012345678
(1 row)
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
\dt *225000000000*
List of relations
Schema | Name | Type | Owner
@ -253,7 +253,7 @@ SELECT create_distributed_table('too_long_12345678901234567890123456789012345678
public | too_long_1234567890123456789012345678901_e0119164_2250000000001 | table | postgres
(2 rows)
\c - - - :master_port
\c - - :master_host :master_port
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 2;
DROP TABLE too_long_12345678901234567890123456789012345678901234567890 CASCADE;
@ -276,7 +276,7 @@ WHERE logicalrelid = U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!0
"elephant_слонслонслонсло_c8b737c2_2250000000002"
(1 row)
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
\dt public.elephant_*
List of relations
Schema | Name | Type | Owner
@ -293,7 +293,7 @@ WHERE logicalrelid = U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!0
public | elephant_слонслонслонсло_14d34928_2250000000003 | index | postgres | elephant_слонслонслонсло_c8b737c2_2250000000003
(2 rows)
\c - - - :master_port
\c - - :master_host :master_port
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 2;
-- Verify that shard_name UDF supports schemas

View File

@ -50,7 +50,7 @@ CREATE TABLE repartition_udt_other (
-- Connect directly to a worker, create and drop the type, then
-- proceed with type creation as above; thus the OIDs will be different.
-- so that the OID is off.
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
-- START type creation
-- ... as well as a function to use as its comparator...
CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean
@ -85,7 +85,7 @@ DEFAULT FOR TYPE test_udt USING HASH AS
OPERATOR 1 = (test_udt, test_udt),
FUNCTION 1 test_udt_hash(test_udt);
-- END type creation
\c - - - :worker_2_port
\c - - :public_worker_2_host :worker_2_port
-- START type creation
-- ... as well as a function to use as its comparator...
CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean
@ -121,7 +121,7 @@ OPERATOR 1 = (test_udt, test_udt),
FUNCTION 1 test_udt_hash(test_udt);
-- END type creation
-- Connect to master
\c - - - :master_port
\c - - :master_host :master_port
-- Distribute and populate the two tables.
SET citus.shard_count TO 3;
SET citus.shard_replication_factor TO 1;

View File

@ -3,7 +3,7 @@
--
SET citus.next_shard_id TO 830000;
-- Create UDF in master and workers
\c - - - :master_port
\c - - :master_host :master_port
DROP FUNCTION IF EXISTS median(double precision[]);
NOTICE: function median(pg_catalog.float8[]) does not exist, skipping
CREATE FUNCTION median(double precision[]) RETURNS double precision
@ -13,7 +13,7 @@ LANGUAGE sql IMMUTABLE AS $_$
ORDER BY 1 LIMIT 2 - MOD(array_upper($1, 1), 2)
OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub;
$_$;
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
DROP FUNCTION IF EXISTS median(double precision[]);
NOTICE: function median(pg_catalog.float8[]) does not exist, skipping
CREATE FUNCTION median(double precision[]) RETURNS double precision
@ -23,7 +23,7 @@ LANGUAGE sql IMMUTABLE AS $_$
ORDER BY 1 LIMIT 2 - MOD(array_upper($1, 1), 2)
OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub;
$_$;
\c - - - :worker_2_port
\c - - :public_worker_2_host :worker_2_port
DROP FUNCTION IF EXISTS median(double precision[]);
NOTICE: function median(pg_catalog.float8[]) does not exist, skipping
CREATE FUNCTION median(double precision[]) RETURNS double precision
@ -34,7 +34,7 @@ LANGUAGE sql IMMUTABLE AS $_$
OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub;
$_$;
-- Run query on master
\c - - - :master_port
\c - - :master_host :master_port
SET citus.task_executor_type TO 'task-tracker';
SELECT * FROM (SELECT median(ARRAY[1,2,sum(l_suppkey)]) as median, count(*)
FROM lineitem GROUP BY l_partkey) AS a

View File

@ -80,45 +80,6 @@ BEGIN
RETURN true;
END;
$func$;
CREATE OR REPLACE FUNCTION wait_until_metadata_sync(timeout INTEGER DEFAULT 15000)
RETURNS void
LANGUAGE C STRICT
AS 'citus';
-- set sync intervals to less than 15s so wait_until_metadata_sync never times out
ALTER SYSTEM SET citus.metadata_sync_interval TO 3000;
ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 500;
SELECT pg_reload_conf();
pg_reload_conf
---------------------------------------------------------------------
t
(1 row)
-- Verifies pg_dist_node and pg_dist_palcement in the given worker matches the ones in coordinator
CREATE OR REPLACE FUNCTION verify_metadata(hostname TEXT, port INTEGER, master_port INTEGER DEFAULT 57636)
RETURNS BOOLEAN
LANGUAGE sql
AS $$
SELECT wait_until_metadata_sync();
WITH dist_node_summary AS (
SELECT 'SELECT jsonb_agg(ROW(nodeid, groupid, nodename, nodeport, isactive) ORDER BY nodeid) FROM pg_dist_node' as query
), dist_node_check AS (
SELECT count(distinct result) = 1 AS matches
FROM dist_node_summary CROSS JOIN LATERAL
master_run_on_worker(ARRAY[hostname, 'localhost'], ARRAY[port, master_port],
ARRAY[dist_node_summary.query, dist_node_summary.query],
false)
), dist_placement_summary AS (
SELECT 'SELECT jsonb_agg(pg_dist_placement ORDER BY shardid) FROM pg_dist_placement)' AS query
), dist_placement_check AS (
SELECT count(distinct result) = 1 AS matches
FROM dist_placement_summary CROSS JOIN LATERAL
master_run_on_worker(ARRAY[hostname, 'localhost'], ARRAY[port, master_port],
ARRAY[dist_placement_summary.query, dist_placement_summary.query],
false)
)
SELECT dist_node_check.matches AND dist_placement_check.matches
FROM dist_node_check CROSS JOIN dist_placement_check
$$;
--
-- Procedure for creating shards for range partitioned distributed table.
--
@ -135,15 +96,3 @@ BEGIN
END LOOP;
END;
$$ LANGUAGE plpgsql;
-- partition_task_list_results tests the internal PartitionTasklistResults function
CREATE OR REPLACE FUNCTION pg_catalog.partition_task_list_results(resultIdPrefix text,
query text,
target_table regclass,
binaryFormat bool DEFAULT true)
RETURNS TABLE(resultId text,
nodeId int,
rowCount bigint,
targetShardId bigint,
targetShardIndex int)
LANGUAGE C STRICT VOLATILE
AS 'citus', $$partition_task_list_results$$;

View File

@ -0,0 +1,51 @@
CREATE OR REPLACE FUNCTION wait_until_metadata_sync(timeout INTEGER DEFAULT 15000)
RETURNS void
LANGUAGE C STRICT
AS 'citus';
-- set sync intervals to less than 15s so wait_until_metadata_sync never times out
ALTER SYSTEM SET citus.metadata_sync_interval TO 3000;
ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 500;
SELECT pg_reload_conf();
pg_reload_conf
---------------------------------------------------------------------
t
(1 row)
-- Verifies pg_dist_node and pg_dist_palcement in the given worker matches the ones in coordinator
CREATE OR REPLACE FUNCTION verify_metadata(hostname TEXT, port INTEGER, master_port INTEGER DEFAULT 57636)
RETURNS BOOLEAN
LANGUAGE sql
AS $$
SELECT wait_until_metadata_sync();
WITH dist_node_summary AS (
SELECT 'SELECT jsonb_agg(ROW(nodeid, groupid, nodename, nodeport, isactive) ORDER BY nodeid) FROM pg_dist_node' as query
), dist_node_check AS (
SELECT count(distinct result) = 1 AS matches
FROM dist_node_summary CROSS JOIN LATERAL
master_run_on_worker(ARRAY[hostname, 'localhost'], ARRAY[port, master_port],
ARRAY[dist_node_summary.query, dist_node_summary.query],
false)
), dist_placement_summary AS (
SELECT 'SELECT jsonb_agg(pg_dist_placement ORDER BY shardid) FROM pg_dist_placement)' AS query
), dist_placement_check AS (
SELECT count(distinct result) = 1 AS matches
FROM dist_placement_summary CROSS JOIN LATERAL
master_run_on_worker(ARRAY[hostname, 'localhost'], ARRAY[port, master_port],
ARRAY[dist_placement_summary.query, dist_placement_summary.query],
false)
)
SELECT dist_node_check.matches AND dist_placement_check.matches
FROM dist_node_check CROSS JOIN dist_placement_check
$$;
-- partition_task_list_results tests the internal PartitionTasklistResults function
CREATE OR REPLACE FUNCTION pg_catalog.partition_task_list_results(resultIdPrefix text,
query text,
target_table regclass,
binaryFormat bool DEFAULT true)
RETURNS TABLE(resultId text,
nodeId int,
rowCount bigint,
targetShardId bigint,
targetShardIndex int)
LANGUAGE C STRICT VOLATILE
AS 'citus', $$partition_task_list_results$$;

View File

@ -198,7 +198,7 @@ SELECT master_create_worker_shards('second_dustbunnies', 1, 2);
(1 row)
-- run VACUUM and ANALYZE against the table on the master
\c - - - :master_port
\c - - :master_host :master_port
SET citus.log_remote_commands TO ON;
VACUUM dustbunnies;
NOTICE: issuing VACUUM public.dustbunnies_990002
@ -229,13 +229,13 @@ NOTICE: issuing VACUUM (ANALYZE) public.dustbunnies_990002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (ANALYZE) public.dustbunnies_990002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
-- disable auto-VACUUM for next test
ALTER TABLE dustbunnies_990002 SET (autovacuum_enabled = false);
SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid='dustbunnies_990002'::regclass
\gset
-- send a VACUUM FREEZE after adding a new row
\c - - - :master_port
\c - - :master_host :master_port
SET citus.log_remote_commands TO ON;
INSERT INTO dustbunnies VALUES (5, 'peter');
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
@ -256,7 +256,7 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (FREEZE) public.dustbunnies_990002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- verify that relfrozenxid increased
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class
WHERE oid='dustbunnies_990002'::regclass;
frozen_performed
@ -275,7 +275,7 @@ WHERE tablename = 'dustbunnies_990002' ORDER BY attname;
(3 rows)
-- add NULL values, then perform column-specific ANALYZE
\c - - - :master_port
\c - - :master_host :master_port
SET citus.log_remote_commands TO ON;
INSERT INTO dustbunnies VALUES (6, NULL, NULL);
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
@ -304,7 +304,7 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- verify that name's NULL ratio is updated but age's is not
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
SELECT attname, null_frac FROM pg_stats
WHERE tablename = 'dustbunnies_990002' ORDER BY attname;
attname | null_frac
@ -314,7 +314,7 @@ WHERE tablename = 'dustbunnies_990002' ORDER BY attname;
name | 0.166667
(3 rows)
\c - - - :master_port
\c - - :master_host :master_port
SET citus.log_remote_commands TO ON;
-- verify warning for unqualified VACUUM
VACUUM;

View File

@ -3,5 +3,5 @@ test: failure_test_helpers
# this should only be run by pg_regress_multi, you don't need it
test: failure_setup
test: multi_test_helpers
test: multi_test_helpers multi_test_helpers_superuser
test: multi_test_catalog_views

View File

@ -3,7 +3,7 @@ test: failure_test_helpers
# this should only be run by pg_regress_multi, you don't need it
test: failure_setup
test: multi_test_helpers
test: multi_test_helpers multi_test_helpers_superuser
test: failure_parallel_connection
test: failure_replicated_partitions
test: multi_test_catalog_views

View File

@ -54,7 +54,7 @@ CREATE TABLE multi_append_table_to_shard_stage
text TEXT not null
);
COPY multi_append_table_to_shard_stage FROM '@abs_srcdir@/data/large_records.data' with delimiter '|';
\COPY multi_append_table_to_shard_stage FROM '@abs_srcdir@/data/large_records.data' with delimiter '|';
-- Check that we error out if we try to append data to a hash partitioned table.
SELECT master_create_empty_shard('multi_append_table_to_shard_right_reference_hash');

View File

@ -15,8 +15,8 @@ SELECT create_distributed_table('users_table', 'user_id');
CREATE TABLE events_table (user_id int, time timestamp, event_type int, value_2 int, value_3 float, value_4 bigint);
SELECT create_distributed_table('events_table', 'user_id');
COPY users_table FROM '@abs_srcdir@/data/users_table.data' WITH CSV;
COPY events_table FROM '@abs_srcdir@/data/events_table.data' WITH CSV;
\COPY users_table FROM '@abs_srcdir@/data/users_table.data' WITH CSV;
\COPY events_table FROM '@abs_srcdir@/data/events_table.data' WITH CSV;
SET citus.shard_count = 96;
CREATE SCHEMA subquery_and_ctes;
@ -28,8 +28,8 @@ SELECT create_distributed_table('users_table', 'user_id');
CREATE TABLE events_table (user_id int, time timestamp, event_type int, value_2 int, value_3 float, value_4 bigint);
SELECT create_distributed_table('events_table', 'user_id');
COPY users_table FROM '@abs_srcdir@/data/users_table.data' WITH CSV;
COPY events_table FROM '@abs_srcdir@/data/events_table.data' WITH CSV;
\COPY users_table FROM '@abs_srcdir@/data/users_table.data' WITH CSV;
\COPY events_table FROM '@abs_srcdir@/data/events_table.data' WITH CSV;
SET citus.shard_count TO DEFAULT;
SET search_path TO DEFAULT;
@ -68,8 +68,8 @@ INSERT INTO users_ref_test_table VALUES(4,'User_4',48);
INSERT INTO users_ref_test_table VALUES(5,'User_5',49);
INSERT INTO users_ref_test_table VALUES(6,'User_6',50);
COPY users_table FROM '@abs_srcdir@/data/users_table.data' WITH CSV;
COPY events_table FROM '@abs_srcdir@/data/events_table.data' WITH CSV;
\COPY users_table FROM '@abs_srcdir@/data/users_table.data' WITH CSV;
\COPY events_table FROM '@abs_srcdir@/data/events_table.data' WITH CSV;
-- create indexes for
CREATE INDEX is_index1 ON users_table(user_id);
@ -86,118 +86,6 @@ CREATE TYPE user_composite_type AS
user_id BIGINT
);
SELECT run_command_on_master_and_workers($f$
CREATE FUNCTION cmp_user_composite_type_function(user_composite_type, user_composite_type) RETURNS int
LANGUAGE 'internal'
AS 'btrecordcmp'
IMMUTABLE
RETURNS NULL ON NULL INPUT;
$f$);
SELECT run_command_on_master_and_workers($f$
CREATE FUNCTION gt_user_composite_type_function(user_composite_type, user_composite_type) RETURNS boolean
LANGUAGE 'internal'
AS 'record_gt'
IMMUTABLE
RETURNS NULL ON NULL INPUT;
$f$);
SELECT run_command_on_master_and_workers($f$
CREATE FUNCTION ge_user_composite_type_function(user_composite_type, user_composite_type) RETURNS boolean
LANGUAGE 'internal'
AS 'record_ge'
IMMUTABLE
RETURNS NULL ON NULL INPUT;
$f$);
SELECT run_command_on_master_and_workers($f$
CREATE FUNCTION equal_user_composite_type_function(user_composite_type, user_composite_type) RETURNS boolean
LANGUAGE 'internal'
AS 'record_eq'
IMMUTABLE;
$f$);
SELECT run_command_on_master_and_workers($f$
CREATE FUNCTION lt_user_composite_type_function(user_composite_type, user_composite_type) RETURNS boolean
LANGUAGE 'internal'
AS 'record_lt'
IMMUTABLE
RETURNS NULL ON NULL INPUT;
$f$);
SELECT run_command_on_master_and_workers($f$
CREATE FUNCTION le_user_composite_type_function(user_composite_type, user_composite_type) RETURNS boolean
LANGUAGE 'internal'
AS 'record_lt'
IMMUTABLE
RETURNS NULL ON NULL INPUT;
$f$);
SELECT run_command_on_master_and_workers($f$
CREATE OPERATOR > (
LEFTARG = user_composite_type,
RIGHTARG = user_composite_type,
PROCEDURE = gt_user_composite_type_function
);
$f$);
SELECT run_command_on_master_and_workers($f$
CREATE OPERATOR >= (
LEFTARG = user_composite_type,
RIGHTARG = user_composite_type,
PROCEDURE = ge_user_composite_type_function
);
$f$);
-- ... use that function to create a custom equality operator...
SELECT run_command_on_master_and_workers($f$
-- ... use that function to create a custom equality operator...
CREATE OPERATOR = (
LEFTARG = user_composite_type,
RIGHTARG = user_composite_type,
PROCEDURE = equal_user_composite_type_function,
commutator = =,
RESTRICT = eqsel,
JOIN = eqjoinsel,
merges,
hashes
);
$f$);
SELECT run_command_on_master_and_workers($f$
CREATE OPERATOR <= (
LEFTARG = user_composite_type,
RIGHTARG = user_composite_type,
PROCEDURE = le_user_composite_type_function
);
$f$);
SELECT run_command_on_master_and_workers($f$
CREATE OPERATOR < (
LEFTARG = user_composite_type,
RIGHTARG = user_composite_type,
PROCEDURE = lt_user_composite_type_function
);
$f$);
-- ... and create a custom operator family for hash indexes...
SELECT run_command_on_master_and_workers($f$
CREATE OPERATOR FAMILY cats_2_op_fam USING hash;
$f$);
-- ... create a test HASH function. Though it is a poor hash function,
-- it is acceptable for our tests
@ -210,184 +98,7 @@ SELECT run_command_on_master_and_workers($f$
RETURNS NULL ON NULL INPUT;
$f$);
-- We need to define two different operator classes for the composite types
-- One uses BTREE the other uses HASH
SELECT run_command_on_master_and_workers($f$
CREATE OPERATOR CLASS cats_2_op_fam_clas3
DEFAULT FOR TYPE user_composite_type USING BTREE AS
OPERATOR 1 <= (user_composite_type, user_composite_type),
OPERATOR 2 < (user_composite_type, user_composite_type),
OPERATOR 3 = (user_composite_type, user_composite_type),
OPERATOR 4 >= (user_composite_type, user_composite_type),
OPERATOR 5 > (user_composite_type, user_composite_type),
FUNCTION 1 cmp_user_composite_type_function(user_composite_type, user_composite_type);
$f$);
SELECT run_command_on_master_and_workers($f$
CREATE OPERATOR CLASS cats_2_op_fam_class
DEFAULT FOR TYPE user_composite_type USING HASH AS
OPERATOR 1 = (user_composite_type, user_composite_type),
FUNCTION 1 test_composite_type_hash(user_composite_type);
$f$);
CREATE TABLE events (
composite_id user_composite_type,
event_id bigint,
event_type character varying(255),
event_time bigint
);
SELECT master_create_distributed_table('events', 'composite_id', 'range');
SELECT master_create_empty_shard('events') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = '(1,1)', shardmaxvalue = '(1,2000000000)'
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('events') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = '(1,2000000001)', shardmaxvalue = '(1,4300000000)'
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('events') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = '(2,1)', shardmaxvalue = '(2,2000000000)'
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('events') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = '(2,2000000001)', shardmaxvalue = '(2,4300000000)'
WHERE shardid = :new_shard_id;
\COPY events FROM STDIN WITH CSV
"(1,1001)",20001,click,1472807012
"(1,1001)",20002,submit,1472807015
"(1,1001)",20003,pay,1472807020
"(1,1002)",20010,click,1472807022
"(1,1002)",20011,click,1472807023
"(1,1002)",20012,submit,1472807025
"(1,1002)",20013,pay,1472807030
"(1,1003)",20014,click,1472807032
"(1,1003)",20015,click,1472807033
"(1,1003)",20016,click,1472807034
"(1,1003)",20017,submit,1472807035
\.
CREATE TABLE users (
composite_id user_composite_type,
lastseen bigint
);
SELECT master_create_distributed_table('users', 'composite_id', 'range');
SELECT master_create_empty_shard('users') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = '(1,1)', shardmaxvalue = '(1,2000000000)'
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('users') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = '(1,2000000001)', shardmaxvalue = '(1,4300000000)'
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('users') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = '(2,1)', shardmaxvalue = '(2,2000000000)'
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('users') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = '(2,2000000001)', shardmaxvalue = '(2,4300000000)'
WHERE shardid = :new_shard_id;
\COPY users FROM STDIN WITH CSV
"(1,1001)",1472807115
"(1,1002)",1472807215
"(1,1003)",1472807315
\.
-- Create tables for subquery tests
CREATE TABLE lineitem_subquery (
l_orderkey bigint not null,
l_partkey integer not null,
l_suppkey integer not null,
l_linenumber integer not null,
l_quantity decimal(15, 2) not null,
l_extendedprice decimal(15, 2) not null,
l_discount decimal(15, 2) not null,
l_tax decimal(15, 2) not null,
l_returnflag char(1) not null,
l_linestatus char(1) not null,
l_shipdate date not null,
l_commitdate date not null,
l_receiptdate date not null,
l_shipinstruct char(25) not null,
l_shipmode char(10) not null,
l_comment varchar(44) not null,
PRIMARY KEY(l_orderkey, l_linenumber) );
SELECT master_create_distributed_table('lineitem_subquery', 'l_orderkey', 'range');
CREATE TABLE orders_subquery (
o_orderkey bigint not null,
o_custkey integer not null,
o_orderstatus char(1) not null,
o_totalprice decimal(15,2) not null,
o_orderdate date not null,
o_orderpriority char(15) not null,
o_clerk char(15) not null,
o_shippriority integer not null,
o_comment varchar(79) not null,
PRIMARY KEY(o_orderkey) );
SELECT master_create_distributed_table('orders_subquery', 'o_orderkey', 'range');
SET citus.enable_router_execution TO 'false';
-- Check that we don't crash if there are not any shards.
SELECT
avg(unit_price)
FROM
(SELECT
l_orderkey,
avg(o_totalprice) AS unit_price
FROM
lineitem_subquery,
orders_subquery
WHERE
l_orderkey = o_orderkey
GROUP BY
l_orderkey) AS unit_prices;
-- Load data into tables.
SELECT master_create_empty_shard('lineitem_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('lineitem_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14947
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('orders_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('orders_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14947
WHERE shardid = :new_shard_id;
SET citus.shard_max_size TO "1MB";
\copy lineitem_subquery FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\copy lineitem_subquery FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\copy orders_subquery FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
\copy orders_subquery FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
SET citus.next_shard_id TO 1400297;
CREATE TABLE events_reference_table (like events_table including all);
SELECT create_reference_table('events_reference_table');

View File

@ -0,0 +1,291 @@
SET citus.next_shard_id TO 1400285;
SELECT run_command_on_master_and_workers($f$
CREATE FUNCTION cmp_user_composite_type_function(user_composite_type, user_composite_type) RETURNS int
LANGUAGE 'internal'
AS 'btrecordcmp'
IMMUTABLE
RETURNS NULL ON NULL INPUT;
$f$);
SELECT run_command_on_master_and_workers($f$
CREATE FUNCTION gt_user_composite_type_function(user_composite_type, user_composite_type) RETURNS boolean
LANGUAGE 'internal'
AS 'record_gt'
IMMUTABLE
RETURNS NULL ON NULL INPUT;
$f$);
SELECT run_command_on_master_and_workers($f$
CREATE FUNCTION ge_user_composite_type_function(user_composite_type, user_composite_type) RETURNS boolean
LANGUAGE 'internal'
AS 'record_ge'
IMMUTABLE
RETURNS NULL ON NULL INPUT;
$f$);
SELECT run_command_on_master_and_workers($f$
CREATE FUNCTION equal_user_composite_type_function(user_composite_type, user_composite_type) RETURNS boolean
LANGUAGE 'internal'
AS 'record_eq'
IMMUTABLE;
$f$);
SELECT run_command_on_master_and_workers($f$
CREATE FUNCTION lt_user_composite_type_function(user_composite_type, user_composite_type) RETURNS boolean
LANGUAGE 'internal'
AS 'record_lt'
IMMUTABLE
RETURNS NULL ON NULL INPUT;
$f$);
SELECT run_command_on_master_and_workers($f$
CREATE FUNCTION le_user_composite_type_function(user_composite_type, user_composite_type) RETURNS boolean
LANGUAGE 'internal'
AS 'record_lt'
IMMUTABLE
RETURNS NULL ON NULL INPUT;
$f$);
SELECT run_command_on_master_and_workers($f$
CREATE OPERATOR > (
LEFTARG = user_composite_type,
RIGHTARG = user_composite_type,
PROCEDURE = gt_user_composite_type_function
);
$f$);
SELECT run_command_on_master_and_workers($f$
CREATE OPERATOR >= (
LEFTARG = user_composite_type,
RIGHTARG = user_composite_type,
PROCEDURE = ge_user_composite_type_function
);
$f$);
-- ... use that function to create a custom equality operator...
SELECT run_command_on_master_and_workers($f$
-- ... use that function to create a custom equality operator...
CREATE OPERATOR = (
LEFTARG = user_composite_type,
RIGHTARG = user_composite_type,
PROCEDURE = equal_user_composite_type_function,
commutator = =,
RESTRICT = eqsel,
JOIN = eqjoinsel,
merges,
hashes
);
$f$);
SELECT run_command_on_master_and_workers($f$
CREATE OPERATOR <= (
LEFTARG = user_composite_type,
RIGHTARG = user_composite_type,
PROCEDURE = le_user_composite_type_function
);
$f$);
SELECT run_command_on_master_and_workers($f$
CREATE OPERATOR < (
LEFTARG = user_composite_type,
RIGHTARG = user_composite_type,
PROCEDURE = lt_user_composite_type_function
);
$f$);
-- ... and create a custom operator family for hash indexes...
SELECT run_command_on_master_and_workers($f$
CREATE OPERATOR FAMILY cats_2_op_fam USING hash;
$f$);
-- We need to define two different operator classes for the composite types
-- One uses BTREE the other uses HASH
SELECT run_command_on_master_and_workers($f$
CREATE OPERATOR CLASS cats_2_op_fam_clas3
DEFAULT FOR TYPE user_composite_type USING BTREE AS
OPERATOR 1 <= (user_composite_type, user_composite_type),
OPERATOR 2 < (user_composite_type, user_composite_type),
OPERATOR 3 = (user_composite_type, user_composite_type),
OPERATOR 4 >= (user_composite_type, user_composite_type),
OPERATOR 5 > (user_composite_type, user_composite_type),
FUNCTION 1 cmp_user_composite_type_function(user_composite_type, user_composite_type);
$f$);
SELECT run_command_on_master_and_workers($f$
CREATE OPERATOR CLASS cats_2_op_fam_class
DEFAULT FOR TYPE user_composite_type USING HASH AS
OPERATOR 1 = (user_composite_type, user_composite_type),
FUNCTION 1 test_composite_type_hash(user_composite_type);
$f$);
CREATE TABLE events (
composite_id user_composite_type,
event_id bigint,
event_type character varying(255),
event_time bigint
);
SELECT master_create_distributed_table('events', 'composite_id', 'range');
SELECT master_create_empty_shard('events') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = '(1,1)', shardmaxvalue = '(1,2000000000)'
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('events') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = '(1,2000000001)', shardmaxvalue = '(1,4300000000)'
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('events') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = '(2,1)', shardmaxvalue = '(2,2000000000)'
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('events') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = '(2,2000000001)', shardmaxvalue = '(2,4300000000)'
WHERE shardid = :new_shard_id;
\COPY events FROM STDIN WITH CSV
"(1,1001)",20001,click,1472807012
"(1,1001)",20002,submit,1472807015
"(1,1001)",20003,pay,1472807020
"(1,1002)",20010,click,1472807022
"(1,1002)",20011,click,1472807023
"(1,1002)",20012,submit,1472807025
"(1,1002)",20013,pay,1472807030
"(1,1003)",20014,click,1472807032
"(1,1003)",20015,click,1472807033
"(1,1003)",20016,click,1472807034
"(1,1003)",20017,submit,1472807035
\.
CREATE TABLE users (
composite_id user_composite_type,
lastseen bigint
);
SELECT master_create_distributed_table('users', 'composite_id', 'range');
SELECT master_create_empty_shard('users') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = '(1,1)', shardmaxvalue = '(1,2000000000)'
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('users') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = '(1,2000000001)', shardmaxvalue = '(1,4300000000)'
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('users') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = '(2,1)', shardmaxvalue = '(2,2000000000)'
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('users') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = '(2,2000000001)', shardmaxvalue = '(2,4300000000)'
WHERE shardid = :new_shard_id;
\COPY users FROM STDIN WITH CSV
"(1,1001)",1472807115
"(1,1002)",1472807215
"(1,1003)",1472807315
\.
-- Create tables for subquery tests
CREATE TABLE lineitem_subquery (
l_orderkey bigint not null,
l_partkey integer not null,
l_suppkey integer not null,
l_linenumber integer not null,
l_quantity decimal(15, 2) not null,
l_extendedprice decimal(15, 2) not null,
l_discount decimal(15, 2) not null,
l_tax decimal(15, 2) not null,
l_returnflag char(1) not null,
l_linestatus char(1) not null,
l_shipdate date not null,
l_commitdate date not null,
l_receiptdate date not null,
l_shipinstruct char(25) not null,
l_shipmode char(10) not null,
l_comment varchar(44) not null,
PRIMARY KEY(l_orderkey, l_linenumber) );
SELECT master_create_distributed_table('lineitem_subquery', 'l_orderkey', 'range');
CREATE TABLE orders_subquery (
o_orderkey bigint not null,
o_custkey integer not null,
o_orderstatus char(1) not null,
o_totalprice decimal(15,2) not null,
o_orderdate date not null,
o_orderpriority char(15) not null,
o_clerk char(15) not null,
o_shippriority integer not null,
o_comment varchar(79) not null,
PRIMARY KEY(o_orderkey) );
SELECT master_create_distributed_table('orders_subquery', 'o_orderkey', 'range');
SET citus.enable_router_execution TO 'false';
-- Check that we don't crash if there are not any shards.
SELECT
avg(unit_price)
FROM
(SELECT
l_orderkey,
avg(o_totalprice) AS unit_price
FROM
lineitem_subquery,
orders_subquery
WHERE
l_orderkey = o_orderkey
GROUP BY
l_orderkey) AS unit_prices;
-- Load data into tables.
SELECT master_create_empty_shard('lineitem_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('lineitem_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14947
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('orders_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('orders_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14947
WHERE shardid = :new_shard_id;
SET citus.shard_max_size TO "1MB";
\copy lineitem_subquery FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\copy lineitem_subquery FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\copy orders_subquery FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
\copy orders_subquery FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'

View File

@ -24,7 +24,3 @@ SET citus.next_shard_id TO 290000;
\copy part_append FROM '@abs_srcdir@/data/part.data' with delimiter '|'
\copy supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|'
\copy supplier_single_shard FROM '@abs_srcdir@/data/supplier.data' with delimiter '|'
\copy lineitem_hash_part FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\copy lineitem_hash_part FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\copy orders_hash_part FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
\copy orders_hash_part FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'

View File

@ -0,0 +1,4 @@
\copy lineitem_hash_part FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\copy lineitem_hash_part FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\copy orders_hash_part FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
\copy orders_hash_part FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'

View File

@ -1,3 +1,3 @@
test: multi_cluster_management
test: multi_test_helpers
test: multi_test_helpers multi_test_helpers_superuser
test: multi_test_catalog_views

View File

@ -14,7 +14,7 @@
# Tests around schema changes, these are run first, so there's no preexisting objects.
# ---
test: multi_extension
test: multi_test_helpers
test: multi_test_helpers multi_test_helpers_superuser
test: multi_mx_node_metadata
test: multi_cluster_management
test: multi_mx_function_table_reference

View File

@ -25,7 +25,7 @@ test: multi_cluster_management
test: alter_role_propagation
test: propagate_extension_commands
test: escape_extension_name
test: multi_test_helpers
test: multi_test_helpers multi_test_helpers_superuser
test: multi_test_catalog_views
test: multi_table_ddl
test: multi_name_lengths
@ -40,8 +40,9 @@ test: multi_read_from_secondaries
# uploading data to it.
# ----------
test: multi_create_table
test: multi_create_table_constraints multi_master_protocol multi_load_data multi_behavioral_analytics_create_table
test: multi_behavioral_analytics_basics multi_behavioral_analytics_single_shard_queries multi_insert_select_non_pushable_queries multi_insert_select
test: multi_create_table_superuser
test: multi_create_table_constraints multi_master_protocol multi_load_data multi_load_data_superuser multi_behavioral_analytics_create_table
test: multi_behavioral_analytics_basics multi_behavioral_analytics_single_shard_queries multi_insert_select_non_pushable_queries multi_insert_select multi_behavioral_analytics_create_table_superuser
test: multi_shard_update_delete recursive_dml_with_different_planners_executors
test: insert_select_repartition window_functions dml_recursive multi_insert_select_window
test: multi_insert_select_conflict

View File

@ -0,0 +1,262 @@
# ----------
# $Id$
#
# Regression tests that exercise distributed planning/execution functionality.
#
# All new regression tests are expected to be run by this schedule. Tests that
# do not set specific task executor type should also be added to
# multi_task_tracker_extra_schedule.
#
# Note that we use variant comparison files to test version dependent regression
# test results. For more information:
# http://www.postgresql.org/docs/current/static/regress-variant.html
# ----------
# ---
# Tests around schema changes, these are run first, so there's no preexisting objects.
#
# propagate_extension_commands lies just after multi_cluster_management as we do
# remove / add node operations, we do not want any preexisting objects before
# propagate_extension_commands
# ---
test: multi_test_helpers
test: multi_test_catalog_views
test: multi_name_resolution
# ----------
# The following distributed tests depend on creating a partitioned table and
# uploading data to it.
# ----------
test: multi_create_table
test: multi_master_protocol multi_load_data multi_behavioral_analytics_create_table
test: recursive_dml_with_different_planners_executors
test: window_functions multi_insert_select_window
# following should not run in parallel because it relies on connection counts to workers
test: insert_select_connection_leak
# ---------
# at the end of the regression tests regaring recursively planned modifications
# ensure that we don't leak any intermediate results
# This test should not run in parallel with any other tests
# ---------
# ----------
# Tests for partitioning support
# ----------
# ----------
# Tests for recursive subquery planning
# ----------
test: subquery_basics subquery_local_tables subquery_executors set_operations set_operation_and_local_tables
test: subquery_partitioning subquery_complex_target_list subqueries_not_supported
test: non_colocated_join_order
test: subquery_prepared_statements pg12 cte_inline
# ----------
# Miscellaneous tests to check our query planning behavior
# ----------
test: multi_distributed_transaction_id
test: hyperscale_tutorial
test: multi_basic_queries multi_complex_expressions multi_subquery_complex_queries multi_subquery_behavioral_analytics
test: multi_subquery_complex_reference_clause multi_subquery_window_functions multi_sql_function
test: multi_function_in_join row_types
test: multi_subquery_in_where_reference_clause full_join adaptive_executor propagate_set_commands
test: multi_subquery_union multi_subquery_in_where_clause multi_subquery_misc
test: multi_limit_clause_approximate multi_single_relation_subquery
test: multi_select_for_update
test: multi_average_expression multi_working_columns multi_having_pushdown
test: multi_array_agg
test: multi_jsonb_agg multi_jsonb_object_agg multi_json_agg multi_json_object_agg bool_agg ch_bench_having ch_bench_subquery_repartition chbenchmark_all_queries expression_reference_join
test: multi_agg_type_conversion multi_count_type_conversion
test: multi_cross_shard
test: multi_dropped_column_aliases foreign_key_restriction_enforcement
test: multi_binary_master_copy_format
# ----------
# Parallel TPC-H tests to check our distributed execution behavior
# ----------
test: multi_tpch_query1 multi_tpch_query3 multi_tpch_query6 multi_tpch_query10
test: multi_tpch_query12 multi_tpch_query14 multi_tpch_query19
test: multi_tpch_query7 multi_tpch_query7_nested
# ----------
# Parallel tests to check our join order planning logic. Note that we load data
# below; and therefore these tests should come after the execution tests.
# ----------
test: multi_join_order_tpch_small multi_join_order_additional
test: multi_join_order_tpch_repartition
# ----------
# Tests for repartition join planning and execution. Be careful when creating
# new shards before these tests, as they expect specific shard identifiers in
# the output.
# ----------
test: multi_repartition_join_ref
test: adaptive_executor_repartition
# ---------
# Tests that modify data should run sequentially
# ---------
test: with_prepare
# ---------
# Tests for recursive planning.
# ---------
test: with_nested with_where
test: cte_prepared_modify cte_nested_modification
test: with_executors with_partitioning with_dml
# ----------
# Tests to check our large record loading and shard deletion behavior
# ----------
test: multi_load_large_records
test: multi_master_delete_protocol
test: multi_shard_modify
# ----------
# Tests around DDL statements run on distributed tables
# ----------
# ----------
# multi_create_schema tests creation, loading, and querying of a table in a new
# schema (namespace).
# ----------
test: multi_create_schema
# ----------
# Tests to check if we inform the user about potential caveats of creating new
# databases, schemas, roles, and authentication information.
# ----------
# ----------
# Tests to check the sequential and parallel executions of DDL and modification
# commands
# Should not be executed in parallel with other tests
# ----------
# ---------
# loads data to create shards in a way that forces
# shard caching.
# ---------
# ---------
# multi_outer_join loads data to create shards to test outer join mappings
# ---------
test: multi_outer_join
# ---
# Tests covering mostly modification queries and required preliminary
# functionality related to metadata, shard creation, shard pruning and
# "hacky" copy script for hash partitioned tables.
# Note that the order of the following tests are important. multi_complex_count_distinct
# is independent from the rest of the group, it is added to increase parallelism.
# ---
test: multi_complex_count_distinct
test: multi_upsert multi_simple_queries
test: foreign_key_to_reference_table validate_constraint
# ---------
# creates hash and range-partitioned tables and performs COPY
# creates hash partitioned tables.
# ---------
test: fast_path_router_modify
test: null_parameters
# ----------
# loads more lineitem data using high shard identifiers
# ----------
# ----------
# tests various size commands on distributed tables
# ----------
# ----------
# multi_drop_extension makes sure we can safely drop and recreate the extension
# ----------
# ----------
# tests the propagation of mx-related metadata changes to metadata workers
# multi_unsupported_worker_operations tests that unsupported operations error out on metadata workers
# ----------
# ----------
# tests if the GRANT ... ON SCHEMA queries are propagated correctly
# makes sure we can work with tables in schemas other than public with no problem
# ----------
# ----------
# multi_function_evaluation tests edge-cases in master-side function pre-evaluation
# ----------
test: multi_function_evaluation
# ----------
# tests truncate functionality for distributed tables
# ----------
# ----------
# tests utility functions written for co-location feature & internal API
# tests master_copy_shard_placement with colocated tables.
# ----------
# ----------
# tests utility functions written for citus tools
# ----------
# ----------
# multi_foreign_key tests foreign key push down on distributed tables
# ----------
test: multi_foreign_key
# ----------
# tests for upgrade_reference_table UDF
# tests replicating reference tables to new nodes after we add new nodes
# tests metadata changes after master_remove_node
# ----------
# --------
# Replicating reference tables to coordinator. Add coordinator to pg_dist_node
# and rerun some of the tests.
# --------
test: foreign_key_to_reference_table
# ----------
# tests for dropping shards using connection API
# ----------
# ----------
# tests simple combinations of permission access and queries
# ----------
# ---------
# tests for an obscure crash citus used to exhibit when shardids
# changed the table they belonged to during a session
# --------
# ---------
# multi_task_string_size tests task string size checks
# ---------
test: multi_task_string_size
# ---------
# connection encryption tests
# ---------
# ---------
# object distribution tests
# ---------
test: distributed_types_xact_add_enum_value
# ---------
# deparsing logic tests
# ---------
# ---------
# test that no tests leaked intermediate results. This should always be last
# Causes random test failures so commented out for now
# ---------
# test:

View File

@ -0,0 +1,213 @@
# ----------
# $Id$
#
# Regression tests that exercise distributed planning/execution functionality.
#
# All new regression tests are expected to be run by this schedule. Tests that
# do not set specific task executor type should also be added to
# multi_task_tracker_extra_schedule.
#
# Note that we use variant comparison files to test version dependent regression
# test results. For more information:
# http://www.postgresql.org/docs/current/static/regress-variant.html
# ----------
# ---
# Tests around schema changes, these are run first, so there's no preexisting objects.
#
# propagate_extension_commands lies just after multi_cluster_management as we do
# remove / add node operations, we do not want any preexisting objects before
# propagate_extension_commands
# ---
test: multi_test_helpers multi_test_helpers_superuser
test: multi_test_catalog_views
test: multi_name_lengths
test: multi_name_resolution
test: multi_metadata_access
# ----------
# The following distributed tests depend on creating a partitioned table and
# uploading data to it.
# ----------
test: multi_create_table
test: multi_create_table_superuser
test: multi_create_table_constraints multi_master_protocol multi_load_data multi_load_data_superuser multi_behavioral_analytics_create_table
test: recursive_dml_with_different_planners_executors multi_behavioral_analytics_create_table_superuser
test: insert_select_repartition dml_recursive multi_insert_select_window
test: multi_insert_select_conflict
# following should not run in parallel because it relies on connection counts to workers
test: insert_select_connection_leak
# ---------
# at the end of the regression tests regaring recursively planned modifications
# ensure that we don't leak any intermediate results
# This test should not run in parallel with any other tests
# ---------
test: ensure_no_intermediate_data_leak
# ----------
# Tests for partitioning support
# ----------
test: multi_partitioning_utils
# ----------
# Tests for recursive subquery planning
# ----------
test: subquery_local_tables subquery_executors subquery_and_cte set_operations set_operation_and_local_tables
test: subqueries_deep subquery_view subquery_partitioning subqueries_not_supported subquery_in_where
test: non_colocated_leaf_subquery_joins non_colocated_subquery_joins non_colocated_join_order
test: subquery_prepared_statements pg12 cte_inline
# ----------
# Miscellaneous tests to check our query planning behavior
# ----------
test: multi_deparse_shard_query multi_distributed_transaction_id limit_intermediate_size
test: hyperscale_tutorial
test: multi_basic_queries multi_subquery multi_subquery_complex_queries multi_subquery_behavioral_analytics
test: multi_subquery_complex_reference_clause multi_subquery_window_functions multi_sql_function
test: multi_function_in_join row_types materialized_view
test: multi_subquery_in_where_reference_clause full_join adaptive_executor propagate_set_commands
test: multi_subquery_union multi_subquery_in_where_clause multi_subquery_misc
test: multi_agg_distinct multi_limit_clause_approximate multi_outer_join_reference
test: multi_select_for_update relation_access_tracking
test: multi_working_columns multi_having_pushdown
test: bool_agg ch_bench_having ch_bench_subquery_repartition chbenchmark_all_queries expression_reference_join
test: multi_agg_type_conversion multi_count_type_conversion
test: multi_partition_pruning
test: multi_join_pruning multi_hash_pruning intermediate_result_pruning
test: multi_query_directory_cleanup
test: multi_cross_shard
test: multi_dropped_column_aliases foreign_key_restriction_enforcement
test: multi_binary_master_copy_format
# ----------
# Parallel TPC-H tests to check our distributed execution behavior
# ----------
test: multi_tpch_query3 multi_tpch_query6 multi_tpch_query10
test: multi_tpch_query12 multi_tpch_query14 multi_tpch_query19
test: multi_tpch_query7 multi_tpch_query7_nested
# ----------
# Parallel tests to check our join order planning logic. Note that we load data
# below; and therefore these tests should come after the execution tests.
# ----------
test: multi_join_order_tpch_small multi_join_order_additional
test: multi_load_more_data
test: multi_join_order_tpch_repartition
# ----------
# Tests for repartition join planning and execution. Be careful when creating
# new shards before these tests, as they expect specific shard identifiers in
# the output.
# ----------
test: multi_repartition_join_planning multi_repartition_join_pruning multi_repartition_join_task_assignment multi_repartition_join_ref
test: adaptive_executor_repartition
# ---------
# Tests that modify data should run sequentially
# ---------
# ---------
# Tests for recursive planning.
# ---------
test: with_where
test: cte_prepared_modify cte_nested_modification
test: ensure_no_intermediate_data_leak
test: with_executors with_partitioning with_dml
# ----------
# Tests to check our large record loading and shard deletion behavior
# ----------
test: multi_load_large_records
test: multi_master_delete_protocol
test: multi_shard_modify
# ----------
# Tests around DDL statements run on distributed tables
# ----------
test: multi_alter_table_add_constraints
# ----------
# multi_create_schema tests creation, loading, and querying of a table in a new
# schema (namespace).
# ----------
test: multi_create_schema
# ----------
# Tests to check the sequential and parallel executions of DDL and modification
# commands
# Should not be executed in parallel with other tests
# ----------
test: sequential_modifications
# ---------
# multi_outer_join loads data to create shards to test outer join mappings
# ---------
test: multi_outer_join
# ---
# Tests covering mostly modification queries and required preliminary
# functionality related to metadata, shard creation, shard pruning and
# "hacky" copy script for hash partitioned tables.
# Note that the order of the following tests are important. multi_complex_count_distinct
# is independent from the rest of the group, it is added to increase parallelism.
# ---
test: multi_create_fdw
test: multi_generate_ddl_commands multi_create_shards multi_prune_shard_list
test: multi_upsert multi_simple_queries multi_data_types
test: multi_utilities foreign_key_to_reference_table validate_constraint
test: multi_repartition_udt multi_repartitioned_subquery_udf
# ---------
# multi_copy creates hash and range-partitioned tables and performs COPY
# multi_router_planner creates hash partitioned tables.
# ---------
test: fast_path_router_modify
test: null_parameters
# ----------
# multi_truncate tests truncate functionality for distributed tables
# ----------
test: multi_truncate
# ----------
# multi_foreign_key tests foreign key push down on distributed tables
# ----------
test: multi_foreign_key multi_foreign_key_relation_graph
# --------
# Replicating reference tables to coordinator. Add coordinator to pg_dist_node
# and rerun some of the tests.
# --------
test: foreign_key_to_reference_table
# ---------
# multi_cache_invalidation tests for an obscure crash citus used to exhibit when shardids
# changed the table they belonged to during a session
# --------
test: multi_cache_invalidation
# ---------
# multi_task_string_size tests task string size checks
# ---------
test: multi_task_string_size
# ---------
# object distribution tests
# ---------
test: distributed_types_conflict disable_object_propagation distributed_types_xact_add_enum_value
# ---------
# deparsing logic tests
# ---------
test: multi_deparse_function
# ---------
# test that no tests leaked intermediate results. This should always be last
# Causes random test failures so commented out for now
# ---------
# test: ensure_no_intermediate_data_leak

View File

@ -16,7 +16,7 @@
test: multi_extension
test: multi_cluster_management
test: multi_table_ddl
test: multi_test_helpers
test: multi_test_helpers multi_test_helpers_superuser
test: multi_test_catalog_views
# ----------
@ -24,8 +24,9 @@ test: multi_test_catalog_views
# uploading data to it.
# ----------
test: multi_create_table
test: multi_create_table_superuser
test: multi_master_protocol
test: multi_load_data
test: multi_load_data multi_load_data_superuser
# ----------
# Miscellaneous tests to check our query planning behavior

View File

@ -2,7 +2,7 @@
# Only run few basic tests to set up a testing environment
# ----------
test: multi_cluster_management
test: multi_test_helpers
test: multi_test_helpers multi_test_helpers_superuser
test: multi_test_catalog_views
# the following test has to be run sequentially

View File

@ -72,7 +72,7 @@ CREATE TABLE multi_append_table_to_shard_stage
number INTEGER not null,
text TEXT not null
);
COPY multi_append_table_to_shard_stage FROM '@abs_srcdir@/data/large_records.data' with delimiter '|';
\COPY multi_append_table_to_shard_stage FROM '@abs_srcdir@/data/large_records.data' with delimiter '|';
-- Check that we error out if we try to append data to a hash partitioned table.
SELECT master_create_empty_shard('multi_append_table_to_shard_right_reference_hash');
ERROR: relation "multi_append_table_to_shard_right_reference_hash" is a hash partitioned table

View File

@ -21,8 +21,8 @@ SELECT create_distributed_table('events_table', 'user_id');
(1 row)
COPY users_table FROM '@abs_srcdir@/data/users_table.data' WITH CSV;
COPY events_table FROM '@abs_srcdir@/data/events_table.data' WITH CSV;
\COPY users_table FROM '@abs_srcdir@/data/users_table.data' WITH CSV;
\COPY events_table FROM '@abs_srcdir@/data/events_table.data' WITH CSV;
SET citus.shard_count = 96;
CREATE SCHEMA subquery_and_ctes;
SET search_path TO subquery_and_ctes;
@ -40,8 +40,8 @@ SELECT create_distributed_table('events_table', 'user_id');
(1 row)
COPY users_table FROM '@abs_srcdir@/data/users_table.data' WITH CSV;
COPY events_table FROM '@abs_srcdir@/data/events_table.data' WITH CSV;
\COPY users_table FROM '@abs_srcdir@/data/users_table.data' WITH CSV;
\COPY events_table FROM '@abs_srcdir@/data/events_table.data' WITH CSV;
SET citus.shard_count TO DEFAULT;
SET search_path TO DEFAULT;
CREATE TABLE users_table (user_id int, time timestamp, value_1 int, value_2 int, value_3 float, value_4 bigint);
@ -110,8 +110,8 @@ INSERT INTO users_ref_test_table VALUES(3,'User_3',47);
INSERT INTO users_ref_test_table VALUES(4,'User_4',48);
INSERT INTO users_ref_test_table VALUES(5,'User_5',49);
INSERT INTO users_ref_test_table VALUES(6,'User_6',50);
COPY users_table FROM '@abs_srcdir@/data/users_table.data' WITH CSV;
COPY events_table FROM '@abs_srcdir@/data/events_table.data' WITH CSV;
\COPY users_table FROM '@abs_srcdir@/data/users_table.data' WITH CSV;
\COPY events_table FROM '@abs_srcdir@/data/events_table.data' WITH CSV;
-- create indexes for
CREATE INDEX is_index1 ON users_table(user_id);
CREATE INDEX is_index2 ON events_table(user_id);
@ -125,165 +125,6 @@ CREATE TYPE user_composite_type AS
tenant_id BIGINT,
user_id BIGINT
);
SELECT run_command_on_master_and_workers($f$
CREATE FUNCTION cmp_user_composite_type_function(user_composite_type, user_composite_type) RETURNS int
LANGUAGE 'internal'
AS 'btrecordcmp'
IMMUTABLE
RETURNS NULL ON NULL INPUT;
$f$);
run_command_on_master_and_workers
-----------------------------------
(1 row)
SELECT run_command_on_master_and_workers($f$
CREATE FUNCTION gt_user_composite_type_function(user_composite_type, user_composite_type) RETURNS boolean
LANGUAGE 'internal'
AS 'record_gt'
IMMUTABLE
RETURNS NULL ON NULL INPUT;
$f$);
run_command_on_master_and_workers
-----------------------------------
(1 row)
SELECT run_command_on_master_and_workers($f$
CREATE FUNCTION ge_user_composite_type_function(user_composite_type, user_composite_type) RETURNS boolean
LANGUAGE 'internal'
AS 'record_ge'
IMMUTABLE
RETURNS NULL ON NULL INPUT;
$f$);
run_command_on_master_and_workers
-----------------------------------
(1 row)
SELECT run_command_on_master_and_workers($f$
CREATE FUNCTION equal_user_composite_type_function(user_composite_type, user_composite_type) RETURNS boolean
LANGUAGE 'internal'
AS 'record_eq'
IMMUTABLE;
$f$);
run_command_on_master_and_workers
-----------------------------------
(1 row)
SELECT run_command_on_master_and_workers($f$
CREATE FUNCTION lt_user_composite_type_function(user_composite_type, user_composite_type) RETURNS boolean
LANGUAGE 'internal'
AS 'record_lt'
IMMUTABLE
RETURNS NULL ON NULL INPUT;
$f$);
run_command_on_master_and_workers
-----------------------------------
(1 row)
SELECT run_command_on_master_and_workers($f$
CREATE FUNCTION le_user_composite_type_function(user_composite_type, user_composite_type) RETURNS boolean
LANGUAGE 'internal'
AS 'record_lt'
IMMUTABLE
RETURNS NULL ON NULL INPUT;
$f$);
run_command_on_master_and_workers
-----------------------------------
(1 row)
SELECT run_command_on_master_and_workers($f$
CREATE OPERATOR > (
LEFTARG = user_composite_type,
RIGHTARG = user_composite_type,
PROCEDURE = gt_user_composite_type_function
);
$f$);
run_command_on_master_and_workers
-----------------------------------
(1 row)
SELECT run_command_on_master_and_workers($f$
CREATE OPERATOR >= (
LEFTARG = user_composite_type,
RIGHTARG = user_composite_type,
PROCEDURE = ge_user_composite_type_function
);
$f$);
run_command_on_master_and_workers
-----------------------------------
(1 row)
-- ... use that function to create a custom equality operator...
SELECT run_command_on_master_and_workers($f$
-- ... use that function to create a custom equality operator...
CREATE OPERATOR = (
LEFTARG = user_composite_type,
RIGHTARG = user_composite_type,
PROCEDURE = equal_user_composite_type_function,
commutator = =,
RESTRICT = eqsel,
JOIN = eqjoinsel,
merges,
hashes
);
$f$);
run_command_on_master_and_workers
-----------------------------------
(1 row)
SELECT run_command_on_master_and_workers($f$
CREATE OPERATOR <= (
LEFTARG = user_composite_type,
RIGHTARG = user_composite_type,
PROCEDURE = le_user_composite_type_function
);
$f$);
run_command_on_master_and_workers
-----------------------------------
(1 row)
SELECT run_command_on_master_and_workers($f$
CREATE OPERATOR < (
LEFTARG = user_composite_type,
RIGHTARG = user_composite_type,
PROCEDURE = lt_user_composite_type_function
);
$f$);
run_command_on_master_and_workers
-----------------------------------
(1 row)
-- ... and create a custom operator family for hash indexes...
SELECT run_command_on_master_and_workers($f$
CREATE OPERATOR FAMILY cats_2_op_fam USING hash;
$f$);
run_command_on_master_and_workers
-----------------------------------
(1 row)
-- ... create a test HASH function. Though it is a poor hash function,
-- it is acceptable for our tests
SELECT run_command_on_master_and_workers($f$
@ -299,177 +140,7 @@ $f$);
(1 row)
-- We need to define two different operator classes for the composite types
-- One uses BTREE the other uses HASH
SELECT run_command_on_master_and_workers($f$
CREATE OPERATOR CLASS cats_2_op_fam_clas3
DEFAULT FOR TYPE user_composite_type USING BTREE AS
OPERATOR 1 <= (user_composite_type, user_composite_type),
OPERATOR 2 < (user_composite_type, user_composite_type),
OPERATOR 3 = (user_composite_type, user_composite_type),
OPERATOR 4 >= (user_composite_type, user_composite_type),
OPERATOR 5 > (user_composite_type, user_composite_type),
FUNCTION 1 cmp_user_composite_type_function(user_composite_type, user_composite_type);
$f$);
run_command_on_master_and_workers
-----------------------------------
(1 row)
SELECT run_command_on_master_and_workers($f$
CREATE OPERATOR CLASS cats_2_op_fam_class
DEFAULT FOR TYPE user_composite_type USING HASH AS
OPERATOR 1 = (user_composite_type, user_composite_type),
FUNCTION 1 test_composite_type_hash(user_composite_type);
$f$);
run_command_on_master_and_workers
-----------------------------------
(1 row)
CREATE TABLE events (
composite_id user_composite_type,
event_id bigint,
event_type character varying(255),
event_time bigint
);
SELECT master_create_distributed_table('events', 'composite_id', 'range');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_empty_shard('events') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = '(1,1)', shardmaxvalue = '(1,2000000000)'
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('events') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = '(1,2000000001)', shardmaxvalue = '(1,4300000000)'
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('events') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = '(2,1)', shardmaxvalue = '(2,2000000000)'
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('events') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = '(2,2000000001)', shardmaxvalue = '(2,4300000000)'
WHERE shardid = :new_shard_id;
\COPY events FROM STDIN WITH CSV
CREATE TABLE users (
composite_id user_composite_type,
lastseen bigint
);
SELECT master_create_distributed_table('users', 'composite_id', 'range');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_empty_shard('users') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = '(1,1)', shardmaxvalue = '(1,2000000000)'
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('users') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = '(1,2000000001)', shardmaxvalue = '(1,4300000000)'
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('users') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = '(2,1)', shardmaxvalue = '(2,2000000000)'
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('users') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = '(2,2000000001)', shardmaxvalue = '(2,4300000000)'
WHERE shardid = :new_shard_id;
\COPY users FROM STDIN WITH CSV
-- Create tables for subquery tests
CREATE TABLE lineitem_subquery (
l_orderkey bigint not null,
l_partkey integer not null,
l_suppkey integer not null,
l_linenumber integer not null,
l_quantity decimal(15, 2) not null,
l_extendedprice decimal(15, 2) not null,
l_discount decimal(15, 2) not null,
l_tax decimal(15, 2) not null,
l_returnflag char(1) not null,
l_linestatus char(1) not null,
l_shipdate date not null,
l_commitdate date not null,
l_receiptdate date not null,
l_shipinstruct char(25) not null,
l_shipmode char(10) not null,
l_comment varchar(44) not null,
PRIMARY KEY(l_orderkey, l_linenumber) );
SELECT master_create_distributed_table('lineitem_subquery', 'l_orderkey', 'range');
master_create_distributed_table
---------------------------------
(1 row)
CREATE TABLE orders_subquery (
o_orderkey bigint not null,
o_custkey integer not null,
o_orderstatus char(1) not null,
o_totalprice decimal(15,2) not null,
o_orderdate date not null,
o_orderpriority char(15) not null,
o_clerk char(15) not null,
o_shippriority integer not null,
o_comment varchar(79) not null,
PRIMARY KEY(o_orderkey) );
SELECT master_create_distributed_table('orders_subquery', 'o_orderkey', 'range');
master_create_distributed_table
---------------------------------
(1 row)
SET citus.enable_router_execution TO 'false';
-- Check that we don't crash if there are not any shards.
SELECT
avg(unit_price)
FROM
(SELECT
l_orderkey,
avg(o_totalprice) AS unit_price
FROM
lineitem_subquery,
orders_subquery
WHERE
l_orderkey = o_orderkey
GROUP BY
l_orderkey) AS unit_prices;
avg
-----
(1 row)
-- Load data into tables.
SELECT master_create_empty_shard('lineitem_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('lineitem_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14947
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('orders_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('orders_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14947
WHERE shardid = :new_shard_id;
SET citus.shard_max_size TO "1MB";
\copy lineitem_subquery FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\copy lineitem_subquery FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\copy orders_subquery FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
\copy orders_subquery FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
SET citus.next_shard_id TO 1400297;
CREATE TABLE events_reference_table (like events_table including all);
SELECT create_reference_table('events_reference_table');
create_reference_table

View File

@ -0,0 +1,331 @@
SET citus.next_shard_id TO 1400285;
SELECT run_command_on_master_and_workers($f$
CREATE FUNCTION cmp_user_composite_type_function(user_composite_type, user_composite_type) RETURNS int
LANGUAGE 'internal'
AS 'btrecordcmp'
IMMUTABLE
RETURNS NULL ON NULL INPUT;
$f$);
run_command_on_master_and_workers
-----------------------------------
(1 row)
SELECT run_command_on_master_and_workers($f$
CREATE FUNCTION gt_user_composite_type_function(user_composite_type, user_composite_type) RETURNS boolean
LANGUAGE 'internal'
AS 'record_gt'
IMMUTABLE
RETURNS NULL ON NULL INPUT;
$f$);
run_command_on_master_and_workers
-----------------------------------
(1 row)
SELECT run_command_on_master_and_workers($f$
CREATE FUNCTION ge_user_composite_type_function(user_composite_type, user_composite_type) RETURNS boolean
LANGUAGE 'internal'
AS 'record_ge'
IMMUTABLE
RETURNS NULL ON NULL INPUT;
$f$);
run_command_on_master_and_workers
-----------------------------------
(1 row)
SELECT run_command_on_master_and_workers($f$
CREATE FUNCTION equal_user_composite_type_function(user_composite_type, user_composite_type) RETURNS boolean
LANGUAGE 'internal'
AS 'record_eq'
IMMUTABLE;
$f$);
run_command_on_master_and_workers
-----------------------------------
(1 row)
SELECT run_command_on_master_and_workers($f$
CREATE FUNCTION lt_user_composite_type_function(user_composite_type, user_composite_type) RETURNS boolean
LANGUAGE 'internal'
AS 'record_lt'
IMMUTABLE
RETURNS NULL ON NULL INPUT;
$f$);
run_command_on_master_and_workers
-----------------------------------
(1 row)
SELECT run_command_on_master_and_workers($f$
CREATE FUNCTION le_user_composite_type_function(user_composite_type, user_composite_type) RETURNS boolean
LANGUAGE 'internal'
AS 'record_lt'
IMMUTABLE
RETURNS NULL ON NULL INPUT;
$f$);
run_command_on_master_and_workers
-----------------------------------
(1 row)
SELECT run_command_on_master_and_workers($f$
CREATE OPERATOR > (
LEFTARG = user_composite_type,
RIGHTARG = user_composite_type,
PROCEDURE = gt_user_composite_type_function
);
$f$);
run_command_on_master_and_workers
-----------------------------------
(1 row)
SELECT run_command_on_master_and_workers($f$
CREATE OPERATOR >= (
LEFTARG = user_composite_type,
RIGHTARG = user_composite_type,
PROCEDURE = ge_user_composite_type_function
);
$f$);
run_command_on_master_and_workers
-----------------------------------
(1 row)
-- ... use that function to create a custom equality operator...
SELECT run_command_on_master_and_workers($f$
-- ... use that function to create a custom equality operator...
CREATE OPERATOR = (
LEFTARG = user_composite_type,
RIGHTARG = user_composite_type,
PROCEDURE = equal_user_composite_type_function,
commutator = =,
RESTRICT = eqsel,
JOIN = eqjoinsel,
merges,
hashes
);
$f$);
run_command_on_master_and_workers
-----------------------------------
(1 row)
SELECT run_command_on_master_and_workers($f$
CREATE OPERATOR <= (
LEFTARG = user_composite_type,
RIGHTARG = user_composite_type,
PROCEDURE = le_user_composite_type_function
);
$f$);
run_command_on_master_and_workers
-----------------------------------
(1 row)
SELECT run_command_on_master_and_workers($f$
CREATE OPERATOR < (
LEFTARG = user_composite_type,
RIGHTARG = user_composite_type,
PROCEDURE = lt_user_composite_type_function
);
$f$);
run_command_on_master_and_workers
-----------------------------------
(1 row)
-- ... and create a custom operator family for hash indexes...
SELECT run_command_on_master_and_workers($f$
CREATE OPERATOR FAMILY cats_2_op_fam USING hash;
$f$);
run_command_on_master_and_workers
-----------------------------------
(1 row)
-- We need to define two different operator classes for the composite types
-- One uses BTREE the other uses HASH
SELECT run_command_on_master_and_workers($f$
CREATE OPERATOR CLASS cats_2_op_fam_clas3
DEFAULT FOR TYPE user_composite_type USING BTREE AS
OPERATOR 1 <= (user_composite_type, user_composite_type),
OPERATOR 2 < (user_composite_type, user_composite_type),
OPERATOR 3 = (user_composite_type, user_composite_type),
OPERATOR 4 >= (user_composite_type, user_composite_type),
OPERATOR 5 > (user_composite_type, user_composite_type),
FUNCTION 1 cmp_user_composite_type_function(user_composite_type, user_composite_type);
$f$);
run_command_on_master_and_workers
-----------------------------------
(1 row)
SELECT run_command_on_master_and_workers($f$
CREATE OPERATOR CLASS cats_2_op_fam_class
DEFAULT FOR TYPE user_composite_type USING HASH AS
OPERATOR 1 = (user_composite_type, user_composite_type),
FUNCTION 1 test_composite_type_hash(user_composite_type);
$f$);
run_command_on_master_and_workers
-----------------------------------
(1 row)
CREATE TABLE events (
composite_id user_composite_type,
event_id bigint,
event_type character varying(255),
event_time bigint
);
SELECT master_create_distributed_table('events', 'composite_id', 'range');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_empty_shard('events') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = '(1,1)', shardmaxvalue = '(1,2000000000)'
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('events') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = '(1,2000000001)', shardmaxvalue = '(1,4300000000)'
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('events') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = '(2,1)', shardmaxvalue = '(2,2000000000)'
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('events') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = '(2,2000000001)', shardmaxvalue = '(2,4300000000)'
WHERE shardid = :new_shard_id;
\COPY events FROM STDIN WITH CSV
CREATE TABLE users (
composite_id user_composite_type,
lastseen bigint
);
SELECT master_create_distributed_table('users', 'composite_id', 'range');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_empty_shard('users') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = '(1,1)', shardmaxvalue = '(1,2000000000)'
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('users') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = '(1,2000000001)', shardmaxvalue = '(1,4300000000)'
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('users') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = '(2,1)', shardmaxvalue = '(2,2000000000)'
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('users') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = '(2,2000000001)', shardmaxvalue = '(2,4300000000)'
WHERE shardid = :new_shard_id;
\COPY users FROM STDIN WITH CSV
-- Create tables for subquery tests
CREATE TABLE lineitem_subquery (
l_orderkey bigint not null,
l_partkey integer not null,
l_suppkey integer not null,
l_linenumber integer not null,
l_quantity decimal(15, 2) not null,
l_extendedprice decimal(15, 2) not null,
l_discount decimal(15, 2) not null,
l_tax decimal(15, 2) not null,
l_returnflag char(1) not null,
l_linestatus char(1) not null,
l_shipdate date not null,
l_commitdate date not null,
l_receiptdate date not null,
l_shipinstruct char(25) not null,
l_shipmode char(10) not null,
l_comment varchar(44) not null,
PRIMARY KEY(l_orderkey, l_linenumber) );
SELECT master_create_distributed_table('lineitem_subquery', 'l_orderkey', 'range');
master_create_distributed_table
---------------------------------
(1 row)
CREATE TABLE orders_subquery (
o_orderkey bigint not null,
o_custkey integer not null,
o_orderstatus char(1) not null,
o_totalprice decimal(15,2) not null,
o_orderdate date not null,
o_orderpriority char(15) not null,
o_clerk char(15) not null,
o_shippriority integer not null,
o_comment varchar(79) not null,
PRIMARY KEY(o_orderkey) );
SELECT master_create_distributed_table('orders_subquery', 'o_orderkey', 'range');
master_create_distributed_table
---------------------------------
(1 row)
SET citus.enable_router_execution TO 'false';
-- Check that we don't crash if there are not any shards.
SELECT
avg(unit_price)
FROM
(SELECT
l_orderkey,
avg(o_totalprice) AS unit_price
FROM
lineitem_subquery,
orders_subquery
WHERE
l_orderkey = o_orderkey
GROUP BY
l_orderkey) AS unit_prices;
avg
-----
(1 row)
-- Load data into tables.
SELECT master_create_empty_shard('lineitem_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('lineitem_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14947
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('orders_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986
WHERE shardid = :new_shard_id;
SELECT master_create_empty_shard('orders_subquery') AS new_shard_id
\gset
UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14947
WHERE shardid = :new_shard_id;
SET citus.shard_max_size TO "1MB";
\copy lineitem_subquery FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\copy lineitem_subquery FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\copy orders_subquery FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
\copy orders_subquery FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'

View File

@ -19,7 +19,3 @@ SET citus.next_shard_id TO 290000;
\copy part_append FROM '@abs_srcdir@/data/part.data' with delimiter '|'
\copy supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|'
\copy supplier_single_shard FROM '@abs_srcdir@/data/supplier.data' with delimiter '|'
\copy lineitem_hash_part FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\copy lineitem_hash_part FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\copy orders_hash_part FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
\copy orders_hash_part FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'

View File

@ -0,0 +1,4 @@
\copy lineitem_hash_part FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
\copy lineitem_hash_part FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
\copy orders_hash_part FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
\copy orders_hash_part FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'

View File

@ -80,6 +80,9 @@ my $pgCtlTimeout = undef;
my $connectionTimeout = 5000;
my $useMitmproxy = 0;
my $mitmFifoPath = catfile($TMP_CHECKDIR, "mitmproxy.fifo");
my $conninfo = "";
my $publicWorker1Host = "localhost";
my $publicWorker2Host = "localhost";
my $serversAreShutdown = "TRUE";
my $usingWindows = 0;
@ -108,6 +111,9 @@ GetOptions(
'pg_ctl-timeout=s' => \$pgCtlTimeout,
'connection-timeout=s' => \$connectionTimeout,
'mitmproxy' => \$useMitmproxy,
'conninfo=s' => \$conninfo,
'worker-1-public-hostname=s' => \$publicWorker1Host,
'worker-2-public-hostname=s' => \$publicWorker2Host,
'help' => sub { Usage() });
# Update environment to include [DY]LD_LIBRARY_PATH/LIBDIR/etc -
@ -261,6 +267,10 @@ sub revert_replace_postgres
# partial run, even if we're now not using valgrind.
revert_replace_postgres();
my $host = "localhost";
my $user = "postgres";
my $dbname = "postgres";
# n.b. previously this was on port 57640, which caused issues because that's in the
# ephemeral port range, it was sometimes in the TIME_WAIT state which prevented us from
# binding to it. 9060 is now used because it will never be used for client connections,
@ -270,12 +280,84 @@ my $mitmPort = 9060;
# Set some default configuration options
my $masterPort = 57636;
my $workerCount = 2;
my @workerHosts = ();
my @workerPorts = ();
for (my $workerIndex = 1; $workerIndex <= $workerCount; $workerIndex++) {
if ( $conninfo )
{
my %convals = split /=|\s/, $conninfo;
if (exists $convals{user})
{
$user = $convals{user};
}
if (exists $convals{host})
{
$host = $convals{host};
}
if (exists $convals{port})
{
$masterPort = $convals{port};
}
if (exists $convals{dbname})
{
$dbname = $convals{dbname};
}
open my $in, '<', "bin/normalize.sed" or die "Cannot open normalize.sed file\n";
open my $out, '>', "bin/normalize_modified.sed" or die "Cannot open normalize_modified.sed file\n";
while ( <$in> )
{
print $out $_;
}
close $in;
print $out "\n";
print $out "s/\\bdbname=regression\\b/dbname=<db>/g\n";
print $out "s/\\bdbname=$dbname\\b/dbname=<db>/g\n";
print $out "s/\\b$user\\b/<user>/g\n";
print $out "s/\\bpostgres\\b/<user>/g\n";
print $out "s/\\blocalhost\\b/<host>/g\n";
print $out "s/\\b$host\\b/<host>/g\n";
print $out "s/\\b576[0-9][0-9]\\b/xxxxx/g\n";
print $out "s/", substr("$masterPort", 0, length("$masterPort")-2), "[0-9][0-9]/xxxxx/g\n";
my $worker1host = `psql "$conninfo" -qtAX -c "SELECT nodename FROM pg_dist_node ORDER BY nodeid LIMIT 1;"`;
my $worker1port = `psql "$conninfo" -qtAX -c "SELECT nodeport FROM pg_dist_node ORDER BY nodeid LIMIT 1;"`;
my $worker2host = `psql "$conninfo" -qtAX -c "SELECT nodename FROM pg_dist_node ORDER BY nodeid OFFSET 1 LIMIT 1;"`;
my $worker2port = `psql "$conninfo" -qtAX -c "SELECT nodeport FROM pg_dist_node ORDER BY nodeid OFFSET 1 LIMIT 1;"`;
$worker1host =~ s/^\s+|\s+$//g;
$worker1port =~ s/^\s+|\s+$//g;
$worker2host =~ s/^\s+|\s+$//g;
$worker2port =~ s/^\s+|\s+$//g;
push(@workerPorts, $worker1port);
push(@workerPorts, $worker2port);
push(@workerHosts, $worker1host);
push(@workerHosts, $worker2host);
my $worker1hostReplaced = $worker1host;
my $worker2hostReplaced = $worker2host;
$worker1hostReplaced =~ s/\./\\\./g;
$worker2hostReplaced =~ s/\./\\\./g;
print $out "s/\\b$worker1hostReplaced\\b/<host>/g\n";
print $out "s/\\b$worker2hostReplaced\\b/<host>/g\n";
}
else
{
for (my $workerIndex = 1; $workerIndex <= $workerCount; $workerIndex++) {
my $workerPort = $masterPort + $workerIndex;
push(@workerPorts, $workerPort);
push(@workerHosts, "localhost");
}
}
my $followerCoordPort = 9070;
@ -285,8 +367,6 @@ for (my $workerIndex = 1; $workerIndex <= $workerCount; $workerIndex++) {
push(@followerWorkerPorts, $workerPort);
}
my $host = "localhost";
my $user = "postgres";
my @pgOptions = ();
# Postgres options set for the tests
@ -396,7 +476,17 @@ for my $option (@userPgOptions)
}
# define functions as signature->definition
%functions = ('fake_fdw_handler()', 'fdw_handler AS \'citus\' LANGUAGE C STRICT;');
%functions = ();
if (!$conninfo)
{
%functions = ('fake_fdw_handler()', 'fdw_handler AS \'citus\' LANGUAGE C STRICT;');
}
else
{
# when running the tests on a cluster these will be created with run_command_on_workers
# so extra single quotes are needed
%functions = ('fake_fdw_handler()', 'fdw_handler AS \'\'citus\'\' LANGUAGE C STRICT;');
}
#define fdws as name->handler name
%fdws = ('fake_fdw', 'fake_fdw_handler');
@ -462,6 +552,14 @@ for my $workeroff (0 .. $#workerPorts)
my $port = $workerPorts[$workeroff];
print $fh "--variable=worker_".($workeroff+1)."_port=$port ";
}
for my $workeroff (0 .. $#workerHosts)
{
my $host = $workerHosts[$workeroff];
print $fh "--variable=worker_".($workeroff+1)."_host=\"$host\" ";
}
print $fh "--variable=master_host=\"$host\" ";
print $fh "--variable=public_worker_1_host=\"$publicWorker1Host\" ";
print $fh "--variable=public_worker_2_host=\"$publicWorker2Host\" ";
for my $workeroff (0 .. $#followerWorkerPorts)
{
my $port = $followerWorkerPorts[$workeroff];
@ -492,51 +590,55 @@ else
}
close $fh;
make_path(catfile($TMP_CHECKDIR, $MASTERDIR, 'log')) or die "Could not create $MASTERDIR directory";
for my $port (@workerPorts)
if (!$conninfo)
{
make_path(catfile($TMP_CHECKDIR, $MASTERDIR, 'log')) or die "Could not create $MASTERDIR directory";
for my $port (@workerPorts)
{
make_path(catfile($TMP_CHECKDIR, "worker.$port", "log"))
or die "Could not create worker directory";
}
}
if ($followercluster)
{
if ($followercluster)
{
make_path(catfile($TMP_CHECKDIR, $MASTER_FOLLOWERDIR, 'log')) or die "Could not create $MASTER_FOLLOWERDIR directory";
for my $port (@followerWorkerPorts)
{
make_path(catfile($TMP_CHECKDIR, "follower.$port", "log"))
or die "Could not create worker directory";
}
}
}
# Create new data directories, copy workers for speed
# --allow-group-access is used to ensure we set permissions on private keys
# correctly
system(catfile("$bindir", "initdb"), ("--nosync", "--allow-group-access", "-U", $user, "--encoding", "UTF8", catfile($TMP_CHECKDIR, $MASTERDIR, "data"))) == 0
# Create new data directories, copy workers for speed
# --allow-group-access is used to ensure we set permissions on private keys
# correctly
system(catfile("$bindir", "initdb"), ("--nosync", "--allow-group-access", "-U", $user, "--encoding", "UTF8", catfile($TMP_CHECKDIR, $MASTERDIR, "data"))) == 0
or die "Could not create $MASTERDIR data directory";
if ($usingWindows)
{
if ($usingWindows)
{
for my $port (@workerPorts)
{
system(catfile("$bindir", "initdb"), ("--nosync", "--allow-group-access", "-U", $user, "--encoding", "UTF8", catfile($TMP_CHECKDIR, "worker.$port", "data"))) == 0
or die "Could not create worker data directory";
}
}
else
{
}
else
{
for my $port (@workerPorts)
{
system("cp", ("-a", catfile($TMP_CHECKDIR, $MASTERDIR, "data"), catfile($TMP_CHECKDIR, "worker.$port", "data"))) == 0
or die "Could not create worker data directory";
}
}
}
# Routine to shutdown servers at failure/exit
sub ShutdownServers()
{
if ($serversAreShutdown eq "FALSE")
if (!$conninfo && $serversAreShutdown eq "FALSE")
{
system(catfile("$bindir", "pg_ctl"),
('stop', '-w', '-D', catfile($TMP_CHECKDIR, $MASTERDIR, 'data'))) == 0
@ -663,17 +765,19 @@ if ($followercluster)
}
# Start servers
if(system(catfile("$bindir", "pg_ctl"),
if (!$conninfo)
{
if(system(catfile("$bindir", "pg_ctl"),
('start', '-w',
'-o', join(" ", @pgOptions)." -c port=$masterPort $synchronousReplication",
'-D', catfile($TMP_CHECKDIR, $MASTERDIR, 'data'), '-l', catfile($TMP_CHECKDIR, $MASTERDIR, 'log', 'postmaster.log'))) != 0)
{
{
system("tail", ("-n20", catfile($TMP_CHECKDIR, $MASTERDIR, "log", "postmaster.log")));
die "Could not start master server";
}
}
for my $port (@workerPorts)
{
for my $port (@workerPorts)
{
if(system(catfile("$bindir", "pg_ctl"),
('start', '-w',
'-o', join(" ", @pgOptions)." -c port=$port $synchronousReplication",
@ -683,6 +787,7 @@ for my $port (@workerPorts)
system("tail", ("-n20", catfile($TMP_CHECKDIR, "worker.$port", "log", "postmaster.log")));
die "Could not start worker server";
}
}
}
# Setup the follower nodes
@ -730,8 +835,10 @@ if ($followercluster)
# Create database, extensions, types, functions and fdws on the workers,
# pg_regress won't know to create them for us.
###
for my $port (@workerPorts)
if (!$conninfo)
{
for my $port (@workerPorts)
{
system(catfile($bindir, "psql"),
('-X', '-h', $host, '-p', $port, '-U', $user, "-d", "postgres",
'-c', "CREATE DATABASE regression;")) == 0
@ -768,6 +875,40 @@ for my $port (@workerPorts)
'-c', "CREATE SERVER $fdwServer FOREIGN DATA WRAPPER $fdwServers{$fdwServer};")) == 0
or die "Could not create server $fdwServer on worker";
}
}
}
else
{
for my $extension (@extensions)
{
system(catfile($bindir, "psql"),
('-X', '-h', $host, '-p', $masterPort, '-U', $user, "-d", $dbname,
'-c', "SELECT run_command_on_workers('CREATE EXTENSION IF NOT EXISTS $extension;');")) == 0
or die "Could not create extension on worker";
}
foreach my $function (keys %functions)
{
system(catfile($bindir, "psql"),
('-X', '-h', $host, '-p', $masterPort, '-U', $user, "-d", $dbname,
'-c', "SELECT run_command_on_workers('CREATE FUNCTION $function RETURNS $functions{$function};');")) == 0
or die "Could not create FUNCTION $function on worker";
}
foreach my $fdw (keys %fdws)
{
system(catfile($bindir, "psql"),
('-X', '-h', $host, '-p', $masterPort, '-U', $user, "-d", $dbname,
'-c', "SELECT run_command_on_workers('CREATE FOREIGN DATA WRAPPER $fdw HANDLER $fdws{$fdw};');")) == 0
or die "Could not create foreign data wrapper $fdw on worker";
}
foreach my $fdwServer (keys %fdwServers)
{
system(catfile($bindir, "psql"),
('-X', '-h', $host, '-p', $masterPort, '-U', $user, "-d", $dbname,
'-c', "SELECT run_command_on_workers('CREATE SERVER $fdwServer FOREIGN DATA WRAPPER $fdwServers{$fdwServer};');")) == 0
or die "Could not create server $fdwServer on worker";
}
}
# Prepare pg_regress arguments
@ -820,6 +961,11 @@ elsif ($isolationtester)
}
else
{
if ($conninfo)
{
push(@arguments, "--dbname=$dbname");
push(@arguments, "--use-existing");
}
$exitcode = system("$plainRegress", @arguments);
}

View File

@ -5,13 +5,13 @@ SELECT run_command_on_workers($$CREATE SCHEMA type_conflict;$$);
-- create a type on a worker that should not cause data loss once overwritten with a type
-- from the coordinator
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
SET citus.enable_ddl_propagation TO off;
SET search_path TO type_conflict;
CREATE TYPE my_precious_type AS (secret text, should bool);
CREATE TABLE local_table (a int, b my_precious_type);
INSERT INTO local_table VALUES (42, ('always bring a towel', true)::my_precious_type);
\c - - - :master_port
\c - - :master_host :master_port
SET search_path TO type_conflict;
-- overwrite the type on the worker from the coordinator. The type should be over written
@ -19,7 +19,7 @@ SET search_path TO type_conflict;
CREATE TYPE my_precious_type AS (scatterd_secret text);
-- verify the data is retained
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
SET search_path TO type_conflict;
-- show fields for table
SELECT pg_class.relname,
@ -34,7 +34,7 @@ ORDER BY attnum;
SELECT * FROM local_table;
\c - - - :master_port
\c - - :master_host :master_port
SET search_path TO type_conflict;
-- make sure worker_create_or_replace correctly generates new names while types are existing

View File

@ -406,11 +406,11 @@ ROLLBACK;
-- There should be no constraint on master and worker(s)
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='products'::regclass;
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.products_1450202'::regclass;
\c - - - :master_port
\c - - :master_host :master_port
-- Tests to check the effect of rollback
BEGIN;
@ -423,11 +423,11 @@ ROLLBACK;
-- There should be no constraint on master and worker(s)
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='products'::regclass;
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.products_1450202'::regclass;
\c - - - :master_port
\c - - :master_host :master_port
DROP TABLE products;

View File

@ -2,14 +2,13 @@
-- MULTI_CREATE_TABLE
--
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 360000;
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 100000;
-- Create new table definitions for use in testing in distributed planning and
-- execution functionality. Also create indexes to boost performance. Since we
-- need to cover both reference join and partitioned join, we have created
-- reference and append distributed version of orders, customer and part tables.
SET citus.next_shard_id TO 360000;
CREATE TABLE lineitem (
l_orderkey bigint not null,
l_partkey integer not null,
@ -141,93 +140,7 @@ SELECT create_distributed_table('supplier_single_shard', 's_suppkey', 'append');
CREATE TABLE mx_table_test (col1 int, col2 text);
-- Since we're superuser, we can set the replication model to 'streaming' to
-- create a one-off MX table... but if we forget to set the replication factor to one,
-- we should see an error reminding us to fix that
SET citus.replication_model TO 'streaming';
SELECT create_distributed_table('mx_table_test', 'col1');
-- ok, so now actually create the one-off MX table
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('mx_table_test', 'col1');
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='mx_table_test'::regclass;
DROP TABLE mx_table_test;
-- Show that master_create_distributed_table ignores citus.replication_model GUC
CREATE TABLE s_table(a int);
SELECT master_create_distributed_table('s_table', 'a', 'hash');
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='s_table'::regclass;
-- Show that master_create_worker_shards complains when RF>1 and replication model is streaming
UPDATE pg_dist_partition SET repmodel = 's' WHERE logicalrelid='s_table'::regclass;
SELECT master_create_worker_shards('s_table', 4, 2);
DROP TABLE s_table;
RESET citus.replication_model;
-- Show that create_distributed_table with append and range distributions ignore
-- citus.replication_model GUC
SET citus.shard_replication_factor TO 2;
SET citus.replication_model TO streaming;
CREATE TABLE repmodel_test (a int);
SELECT create_distributed_table('repmodel_test', 'a', 'append');
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT create_distributed_table('repmodel_test', 'a', 'range');
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
DROP TABLE repmodel_test;
-- Show that master_create_distributed_table created statement replicated tables no matter
-- what citus.replication_model set to
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'hash');
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'append');
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'range');
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
DROP TABLE repmodel_test;
-- Check that the replication_model overwrite behavior is the same with RF=1
SET citus.shard_replication_factor TO 1;
CREATE TABLE repmodel_test (a int);
SELECT create_distributed_table('repmodel_test', 'a', 'append');
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT create_distributed_table('repmodel_test', 'a', 'range');
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'hash');
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'append');
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'range');
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
DROP TABLE repmodel_test;
RESET citus.replication_model;
SET citus.next_shard_id TO 360009;
-- Test initial data loading
CREATE TABLE data_load_test (col1 int, col2 text, col3 serial);
@ -288,348 +201,3 @@ WHERE col1 = 132;
DROP TABLE data_load_test1, data_load_test2;
END;
-- There should be no table on the worker node
\c - - - :worker_1_port
SELECT relname FROM pg_class WHERE relname LIKE 'data_load_test%';
\c - - - :master_port
-- creating an index after loading data works
BEGIN;
CREATE TABLE data_load_test (col1 int, col2 text, col3 serial);
INSERT INTO data_load_test VALUES (132, 'hello');
SELECT create_distributed_table('data_load_test', 'col1');
CREATE INDEX data_load_test_idx ON data_load_test (col2);
DROP TABLE data_load_test;
END;
-- popping in and out of existence in the same transaction works
BEGIN;
CREATE TABLE data_load_test (col1 int, col2 text, col3 serial);
INSERT INTO data_load_test VALUES (132, 'hello');
SELECT create_distributed_table('data_load_test', 'col1');
DROP TABLE data_load_test;
END;
-- but dropping after a write on the distributed table is currently disallowed
BEGIN;
CREATE TABLE data_load_test (col1 int, col2 text, col3 serial);
INSERT INTO data_load_test VALUES (132, 'hello');
SELECT create_distributed_table('data_load_test', 'col1');
INSERT INTO data_load_test VALUES (243, 'world');
DROP TABLE data_load_test;
END;
-- Test data loading after dropping a column
CREATE TABLE data_load_test (col1 int, col2 text, col3 text, "CoL4"")" int);
INSERT INTO data_load_test VALUES (132, 'hello', 'world');
INSERT INTO data_load_test VALUES (243, 'world', 'hello');
ALTER TABLE data_load_test DROP COLUMN col1;
SELECT create_distributed_table('data_load_test', 'col3');
SELECT * FROM data_load_test ORDER BY col2;
-- make sure the tuple went to the right shard
SELECT * FROM data_load_test WHERE col3 = 'world';
DROP TABLE data_load_test;
SET citus.shard_replication_factor TO default;
SET citus.shard_count to 4;
CREATE TABLE lineitem_hash_part (like lineitem);
SELECT create_distributed_table('lineitem_hash_part', 'l_orderkey');
CREATE TABLE orders_hash_part (like orders);
SELECT create_distributed_table('orders_hash_part', 'o_orderkey');
CREATE UNLOGGED TABLE unlogged_table
(
key text,
value text
);
SELECT create_distributed_table('unlogged_table', 'key');
SELECT * FROM master_get_table_ddl_events('unlogged_table');
\c - - - :worker_1_port
SELECT relpersistence FROM pg_class WHERE relname LIKE 'unlogged_table_%';
\c - - - :master_port
-- Test rollback of create table
BEGIN;
CREATE TABLE rollback_table(id int, name varchar(20));
SELECT create_distributed_table('rollback_table','id');
ROLLBACK;
-- Table should not exist on the worker node
\c - - - :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid FROM pg_class WHERE relname LIKE 'rollback_table%');
\c - - - :master_port
-- Insert 3 rows to make sure that copy after shard creation touches the same
-- worker node twice.
BEGIN;
CREATE TABLE rollback_table(id int, name varchar(20));
INSERT INTO rollback_table VALUES(1, 'Name_1');
INSERT INTO rollback_table VALUES(2, 'Name_2');
INSERT INTO rollback_table VALUES(3, 'Name_3');
SELECT create_distributed_table('rollback_table','id');
ROLLBACK;
-- Table should not exist on the worker node
\c - - - :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid FROM pg_class WHERE relname LIKE 'rollback_table%');
\c - - - :master_port
BEGIN;
CREATE TABLE rollback_table(id int, name varchar(20));
SELECT create_distributed_table('rollback_table','id');
\copy rollback_table from stdin delimiter ','
1, 'name_1'
2, 'name_2'
3, 'name_3'
\.
CREATE INDEX rollback_index ON rollback_table(id);
COMMIT;
-- Check the table is created
SELECT count(*) FROM rollback_table;
DROP TABLE rollback_table;
BEGIN;
CREATE TABLE rollback_table(id int, name varchar(20));
SELECT create_distributed_table('rollback_table','id');
\copy rollback_table from stdin delimiter ','
1, 'name_1'
2, 'name_2'
3, 'name_3'
\.
ROLLBACK;
-- Table should not exist on the worker node
\c - - - :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid FROM pg_class WHERE relname LIKE 'rollback_table%');
\c - - - :master_port
BEGIN;
CREATE TABLE tt1(id int);
SELECT create_distributed_table('tt1','id');
CREATE TABLE tt2(id int);
SELECT create_distributed_table('tt2','id');
INSERT INTO tt1 VALUES(1);
INSERT INTO tt2 SELECT * FROM tt1 WHERE id = 1;
COMMIT;
-- Table should exist on the worker node
\c - - - :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.tt1_360069'::regclass;
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.tt2_360073'::regclass;
\c - - - :master_port
DROP TABLE tt1;
DROP TABLE tt2;
-- It is known that creating a table with master_create_empty_shard is not
-- transactional, so table stay remaining on the worker node after the rollback
BEGIN;
CREATE TABLE append_tt1(id int);
SELECT create_distributed_table('append_tt1','id','append');
SELECT master_create_empty_shard('append_tt1');
ROLLBACK;
-- Table exists on the worker node.
\c - - - :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.append_tt1_360077'::regclass;
\c - - - :master_port
-- There should be no table on the worker node
\c - - - :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid from pg_class WHERE relname LIKE 'public.tt1%');
\c - - - :master_port
-- Queries executing with router executor is allowed in the same transaction
-- with create_distributed_table
BEGIN;
CREATE TABLE tt1(id int);
INSERT INTO tt1 VALUES(1);
SELECT create_distributed_table('tt1','id');
INSERT INTO tt1 VALUES(2);
SELECT * FROM tt1 WHERE id = 1;
COMMIT;
-- Placements should be created on the worker
\c - - - :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.tt1_360078'::regclass;
\c - - - :master_port
DROP TABLE tt1;
BEGIN;
CREATE TABLE tt1(id int);
SELECT create_distributed_table('tt1','id');
DROP TABLE tt1;
COMMIT;
-- There should be no table on the worker node
\c - - - :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid from pg_class WHERE relname LIKE 'tt1%');
\c - - - :master_port
-- Tests with create_distributed_table & DDL & DML commands
-- Test should pass since GetPlacementListConnection can provide connections
-- in this order of execution
CREATE TABLE sample_table(id int);
SELECT create_distributed_table('sample_table','id');
BEGIN;
CREATE TABLE stage_table (LIKE sample_table);
\COPY stage_table FROM stdin; -- Note that this operation is a local copy
1
2
3
4
\.
SELECT create_distributed_table('stage_table', 'id');
INSERT INTO sample_table SELECT * FROM stage_table;
DROP TABLE stage_table;
SELECT * FROM sample_table WHERE id = 3;
COMMIT;
-- Show that rows of sample_table are updated
SELECT count(*) FROM sample_table;
DROP table sample_table;
-- Test as create_distributed_table - copy - create_distributed_table - copy
-- This combination is used by tests written by some ORMs.
BEGIN;
CREATE TABLE tt1(id int);
SELECT create_distributed_table('tt1','id');
\COPY tt1 from stdin;
1
2
3
\.
CREATE TABLE tt2(like tt1);
SELECT create_distributed_table('tt2','id');
\COPY tt2 from stdin;
4
5
6
\.
INSERT INTO tt1 SELECT * FROM tt2;
SELECT * FROM tt1 WHERE id = 3;
SELECT * FROM tt2 WHERE id = 6;
END;
SELECT count(*) FROM tt1;
-- the goal of the following test is to make sure that
-- both create_reference_table and create_distributed_table
-- calls creates the schemas without leading to any deadlocks
-- first create reference table, then hash distributed table
BEGIN;
CREATE SCHEMA sc;
CREATE TABLE sc.ref(a int);
insert into sc.ref SELECT s FROM generate_series(0, 100) s;
SELECT create_reference_table('sc.ref');
CREATE TABLE sc.hash(a int);
insert into sc.hash SELECT s FROM generate_series(0, 100) s;
SELECT create_distributed_table('sc.hash', 'a');
COMMIT;
-- first create hash distributed table, then reference table
BEGIN;
CREATE SCHEMA sc2;
CREATE TABLE sc2.hash(a int);
insert into sc2.hash SELECT s FROM generate_series(0, 100) s;
SELECT create_distributed_table('sc2.hash', 'a');
CREATE TABLE sc2.ref(a int);
insert into sc2.ref SELECT s FROM generate_series(0, 100) s;
SELECT create_reference_table('sc2.ref');
COMMIT;
SET citus.shard_count TO 4;
BEGIN;
CREATE SCHEMA sc3;
CREATE TABLE sc3.alter_replica_table
(
name text,
id int PRIMARY KEY
);
ALTER TABLE sc3.alter_replica_table REPLICA IDENTITY USING INDEX alter_replica_table_pkey;
SELECT create_distributed_table('sc3.alter_replica_table', 'id');
COMMIT;
SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='sc3' LIMIT 1$$);
BEGIN;
CREATE SCHEMA sc4;
CREATE TABLE sc4.alter_replica_table
(
name text,
id int PRIMARY KEY
);
INSERT INTO sc4.alter_replica_table(id) SELECT generate_series(1,100);
SET search_path = 'sc4';
ALTER TABLE alter_replica_table REPLICA IDENTITY USING INDEX alter_replica_table_pkey;
SELECT create_distributed_table('alter_replica_table', 'id');
COMMIT;
SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='sc4' LIMIT 1$$);
SET search_path = 'public';
BEGIN;
CREATE SCHEMA sc5;
CREATE TABLE sc5.alter_replica_table
(
name text,
id int NOT NULL
);
INSERT INTO sc5.alter_replica_table(id) SELECT generate_series(1,100);
ALTER TABLE sc5.alter_replica_table REPLICA IDENTITY FULL;
SELECT create_distributed_table('sc5.alter_replica_table', 'id');
COMMIT;
SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='sc5' LIMIT 1$$);
BEGIN;
CREATE SCHEMA sc6;
CREATE TABLE sc6.alter_replica_table
(
name text,
id int NOT NULL
);
INSERT INTO sc6.alter_replica_table(id) SELECT generate_series(1,100);
CREATE UNIQUE INDEX unique_idx ON sc6.alter_replica_table(id);
ALTER TABLE sc6.alter_replica_table REPLICA IDENTITY USING INDEX unique_idx;
SELECT create_distributed_table('sc6.alter_replica_table', 'id');
COMMIT;
SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='sc6' LIMIT 1$$);
BEGIN;
CREATE TABLE alter_replica_table
(
name text,
id int NOT NULL
);
INSERT INTO alter_replica_table(id) SELECT generate_series(1,100);
CREATE UNIQUE INDEX unique_idx ON alter_replica_table(id);
ALTER TABLE alter_replica_table REPLICA IDENTITY USING INDEX unique_idx;
SELECT create_distributed_table('alter_replica_table', 'id');
COMMIT;
SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='public' LIMIT 1$$);
DROP TABLE tt1;
DROP TABLE tt2;
DROP TABLE alter_replica_table;
DROP SCHEMA sc CASCADE;
DROP SCHEMA sc2 CASCADE;
DROP SCHEMA sc3 CASCADE;
DROP SCHEMA sc4 CASCADE;
DROP SCHEMA sc5 CASCADE;
DROP SCHEMA sc6 CASCADE;

View File

@ -224,11 +224,11 @@ CREATE TABLE check_example
other_other_col integer CHECK (abs(other_other_col) >= 100)
);
SELECT create_distributed_table('check_example', 'partition_col', 'hash');
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
relid = 'check_example_partition_col_key_365056'::regclass;
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.check_example_365056'::regclass;
\c - - - :master_port
\c - - :master_host :master_port
-- Index-based constraints are created with shard-extended names, but others
-- (e.g. expression-based table CHECK constraints) do _not_ have shardids in

View File

@ -0,0 +1,438 @@
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 360005;
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 100000;
-- Since we're superuser, we can set the replication model to 'streaming' to
-- create a one-off MX table... but if we forget to set the replication factor to one,
-- we should see an error reminding us to fix that
SET citus.replication_model TO 'streaming';
SELECT create_distributed_table('mx_table_test', 'col1');
-- ok, so now actually create the one-off MX table
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('mx_table_test', 'col1');
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='mx_table_test'::regclass;
DROP TABLE mx_table_test;
-- Show that master_create_distributed_table ignores citus.replication_model GUC
CREATE TABLE s_table(a int);
SELECT master_create_distributed_table('s_table', 'a', 'hash');
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='s_table'::regclass;
-- Show that master_create_worker_shards complains when RF>1 and replication model is streaming
UPDATE pg_dist_partition SET repmodel = 's' WHERE logicalrelid='s_table'::regclass;
SELECT master_create_worker_shards('s_table', 4, 2);
DROP TABLE s_table;
RESET citus.replication_model;
-- Show that create_distributed_table with append and range distributions ignore
-- citus.replication_model GUC
SET citus.shard_replication_factor TO 2;
SET citus.replication_model TO streaming;
CREATE TABLE repmodel_test (a int);
SELECT create_distributed_table('repmodel_test', 'a', 'append');
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT create_distributed_table('repmodel_test', 'a', 'range');
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
DROP TABLE repmodel_test;
-- Show that master_create_distributed_table created statement replicated tables no matter
-- what citus.replication_model set to
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'hash');
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'append');
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'range');
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
DROP TABLE repmodel_test;
-- Check that the replication_model overwrite behavior is the same with RF=1
SET citus.shard_replication_factor TO 1;
CREATE TABLE repmodel_test (a int);
SELECT create_distributed_table('repmodel_test', 'a', 'append');
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT create_distributed_table('repmodel_test', 'a', 'range');
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'hash');
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'append');
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'range');
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
DROP TABLE repmodel_test;
RESET citus.replication_model;
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 360025;
-- There should be no table on the worker node
\c - - :public_worker_1_host :worker_1_port
SELECT relname FROM pg_class WHERE relname LIKE 'data_load_test%';
\c - - :master_host :master_port
-- creating an index after loading data works
BEGIN;
CREATE TABLE data_load_test (col1 int, col2 text, col3 serial);
INSERT INTO data_load_test VALUES (132, 'hello');
SELECT create_distributed_table('data_load_test', 'col1');
CREATE INDEX data_load_test_idx ON data_load_test (col2);
DROP TABLE data_load_test;
END;
-- popping in and out of existence in the same transaction works
BEGIN;
CREATE TABLE data_load_test (col1 int, col2 text, col3 serial);
INSERT INTO data_load_test VALUES (132, 'hello');
SELECT create_distributed_table('data_load_test', 'col1');
DROP TABLE data_load_test;
END;
-- but dropping after a write on the distributed table is currently disallowed
BEGIN;
CREATE TABLE data_load_test (col1 int, col2 text, col3 serial);
INSERT INTO data_load_test VALUES (132, 'hello');
SELECT create_distributed_table('data_load_test', 'col1');
INSERT INTO data_load_test VALUES (243, 'world');
DROP TABLE data_load_test;
END;
-- Test data loading after dropping a column
CREATE TABLE data_load_test (col1 int, col2 text, col3 text, "CoL4"")" int);
INSERT INTO data_load_test VALUES (132, 'hello', 'world');
INSERT INTO data_load_test VALUES (243, 'world', 'hello');
ALTER TABLE data_load_test DROP COLUMN col1;
SELECT create_distributed_table('data_load_test', 'col3');
SELECT * FROM data_load_test ORDER BY col2;
-- make sure the tuple went to the right shard
SELECT * FROM data_load_test WHERE col3 = 'world';
DROP TABLE data_load_test;
SET citus.shard_replication_factor TO default;
SET citus.shard_count to 4;
CREATE TABLE lineitem_hash_part (like lineitem);
SELECT create_distributed_table('lineitem_hash_part', 'l_orderkey');
CREATE TABLE orders_hash_part (like orders);
SELECT create_distributed_table('orders_hash_part', 'o_orderkey');
CREATE UNLOGGED TABLE unlogged_table
(
key text,
value text
);
SELECT create_distributed_table('unlogged_table', 'key');
SELECT * FROM master_get_table_ddl_events('unlogged_table');
\c - - :public_worker_1_host :worker_1_port
SELECT relpersistence FROM pg_class WHERE relname LIKE 'unlogged_table_%';
\c - - :master_host :master_port
-- Test rollback of create table
BEGIN;
CREATE TABLE rollback_table(id int, name varchar(20));
SELECT create_distributed_table('rollback_table','id');
ROLLBACK;
-- Table should not exist on the worker node
\c - - :public_worker_1_host :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid FROM pg_class WHERE relname LIKE 'rollback_table%');
\c - - :master_host :master_port
-- Insert 3 rows to make sure that copy after shard creation touches the same
-- worker node twice.
BEGIN;
CREATE TABLE rollback_table(id int, name varchar(20));
INSERT INTO rollback_table VALUES(1, 'Name_1');
INSERT INTO rollback_table VALUES(2, 'Name_2');
INSERT INTO rollback_table VALUES(3, 'Name_3');
SELECT create_distributed_table('rollback_table','id');
ROLLBACK;
-- Table should not exist on the worker node
\c - - :public_worker_1_host :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid FROM pg_class WHERE relname LIKE 'rollback_table%');
\c - - :master_host :master_port
BEGIN;
CREATE TABLE rollback_table(id int, name varchar(20));
SELECT create_distributed_table('rollback_table','id');
\copy rollback_table from stdin delimiter ','
1, 'name_1'
2, 'name_2'
3, 'name_3'
\.
CREATE INDEX rollback_index ON rollback_table(id);
COMMIT;
-- Check the table is created
SELECT count(*) FROM rollback_table;
DROP TABLE rollback_table;
BEGIN;
CREATE TABLE rollback_table(id int, name varchar(20));
SELECT create_distributed_table('rollback_table','id');
\copy rollback_table from stdin delimiter ','
1, 'name_1'
2, 'name_2'
3, 'name_3'
\.
ROLLBACK;
-- Table should not exist on the worker node
\c - - :public_worker_1_host :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid FROM pg_class WHERE relname LIKE 'rollback_table%');
\c - - :master_host :master_port
BEGIN;
CREATE TABLE tt1(id int);
SELECT create_distributed_table('tt1','id');
CREATE TABLE tt2(id int);
SELECT create_distributed_table('tt2','id');
INSERT INTO tt1 VALUES(1);
INSERT INTO tt2 SELECT * FROM tt1 WHERE id = 1;
COMMIT;
-- Table should exist on the worker node
\c - - :public_worker_1_host :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.tt1_360069'::regclass;
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.tt2_360073'::regclass;
\c - - :master_host :master_port
DROP TABLE tt1;
DROP TABLE tt2;
-- It is known that creating a table with master_create_empty_shard is not
-- transactional, so table stay remaining on the worker node after the rollback
BEGIN;
CREATE TABLE append_tt1(id int);
SELECT create_distributed_table('append_tt1','id','append');
SELECT master_create_empty_shard('append_tt1');
ROLLBACK;
-- Table exists on the worker node.
\c - - :public_worker_1_host :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.append_tt1_360077'::regclass;
\c - - :master_host :master_port
-- There should be no table on the worker node
\c - - :public_worker_1_host :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid from pg_class WHERE relname LIKE 'public.tt1%');
\c - - :master_host :master_port
-- Queries executing with router executor is allowed in the same transaction
-- with create_distributed_table
BEGIN;
CREATE TABLE tt1(id int);
INSERT INTO tt1 VALUES(1);
SELECT create_distributed_table('tt1','id');
INSERT INTO tt1 VALUES(2);
SELECT * FROM tt1 WHERE id = 1;
COMMIT;
-- Placements should be created on the worker
\c - - :public_worker_1_host :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.tt1_360078'::regclass;
\c - - :master_host :master_port
DROP TABLE tt1;
BEGIN;
CREATE TABLE tt1(id int);
SELECT create_distributed_table('tt1','id');
DROP TABLE tt1;
COMMIT;
-- There should be no table on the worker node
\c - - :public_worker_1_host :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid from pg_class WHERE relname LIKE 'tt1%');
\c - - :master_host :master_port
-- Tests with create_distributed_table & DDL & DML commands
-- Test should pass since GetPlacementListConnection can provide connections
-- in this order of execution
CREATE TABLE sample_table(id int);
SELECT create_distributed_table('sample_table','id');
BEGIN;
CREATE TABLE stage_table (LIKE sample_table);
\COPY stage_table FROM stdin; -- Note that this operation is a local copy
1
2
3
4
\.
SELECT create_distributed_table('stage_table', 'id');
INSERT INTO sample_table SELECT * FROM stage_table;
DROP TABLE stage_table;
SELECT * FROM sample_table WHERE id = 3;
COMMIT;
-- Show that rows of sample_table are updated
SELECT count(*) FROM sample_table;
DROP table sample_table;
-- Test as create_distributed_table - copy - create_distributed_table - copy
-- This combination is used by tests written by some ORMs.
BEGIN;
CREATE TABLE tt1(id int);
SELECT create_distributed_table('tt1','id');
\COPY tt1 from stdin;
1
2
3
\.
CREATE TABLE tt2(like tt1);
SELECT create_distributed_table('tt2','id');
\COPY tt2 from stdin;
4
5
6
\.
INSERT INTO tt1 SELECT * FROM tt2;
SELECT * FROM tt1 WHERE id = 3;
SELECT * FROM tt2 WHERE id = 6;
END;
SELECT count(*) FROM tt1;
-- the goal of the following test is to make sure that
-- both create_reference_table and create_distributed_table
-- calls creates the schemas without leading to any deadlocks
-- first create reference table, then hash distributed table
BEGIN;
CREATE SCHEMA sc;
CREATE TABLE sc.ref(a int);
insert into sc.ref SELECT s FROM generate_series(0, 100) s;
SELECT create_reference_table('sc.ref');
CREATE TABLE sc.hash(a int);
insert into sc.hash SELECT s FROM generate_series(0, 100) s;
SELECT create_distributed_table('sc.hash', 'a');
COMMIT;
-- first create hash distributed table, then reference table
BEGIN;
CREATE SCHEMA sc2;
CREATE TABLE sc2.hash(a int);
insert into sc2.hash SELECT s FROM generate_series(0, 100) s;
SELECT create_distributed_table('sc2.hash', 'a');
CREATE TABLE sc2.ref(a int);
insert into sc2.ref SELECT s FROM generate_series(0, 100) s;
SELECT create_reference_table('sc2.ref');
COMMIT;
SET citus.shard_count TO 4;
BEGIN;
CREATE SCHEMA sc3;
CREATE TABLE sc3.alter_replica_table
(
name text,
id int PRIMARY KEY
);
ALTER TABLE sc3.alter_replica_table REPLICA IDENTITY USING INDEX alter_replica_table_pkey;
SELECT create_distributed_table('sc3.alter_replica_table', 'id');
COMMIT;
SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='sc3' LIMIT 1$$);
BEGIN;
CREATE SCHEMA sc4;
CREATE TABLE sc4.alter_replica_table
(
name text,
id int PRIMARY KEY
);
INSERT INTO sc4.alter_replica_table(id) SELECT generate_series(1,100);
SET search_path = 'sc4';
ALTER TABLE alter_replica_table REPLICA IDENTITY USING INDEX alter_replica_table_pkey;
SELECT create_distributed_table('alter_replica_table', 'id');
COMMIT;
SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='sc4' LIMIT 1$$);
SET search_path = 'public';
BEGIN;
CREATE SCHEMA sc5;
CREATE TABLE sc5.alter_replica_table
(
name text,
id int NOT NULL
);
INSERT INTO sc5.alter_replica_table(id) SELECT generate_series(1,100);
ALTER TABLE sc5.alter_replica_table REPLICA IDENTITY FULL;
SELECT create_distributed_table('sc5.alter_replica_table', 'id');
COMMIT;
SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='sc5' LIMIT 1$$);
BEGIN;
CREATE SCHEMA sc6;
CREATE TABLE sc6.alter_replica_table
(
name text,
id int NOT NULL
);
INSERT INTO sc6.alter_replica_table(id) SELECT generate_series(1,100);
CREATE UNIQUE INDEX unique_idx ON sc6.alter_replica_table(id);
ALTER TABLE sc6.alter_replica_table REPLICA IDENTITY USING INDEX unique_idx;
SELECT create_distributed_table('sc6.alter_replica_table', 'id');
COMMIT;
SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='sc6' LIMIT 1$$);
BEGIN;
CREATE TABLE alter_replica_table
(
name text,
id int NOT NULL
);
INSERT INTO alter_replica_table(id) SELECT generate_series(1,100);
CREATE UNIQUE INDEX unique_idx ON alter_replica_table(id);
ALTER TABLE alter_replica_table REPLICA IDENTITY USING INDEX unique_idx;
SELECT create_distributed_table('alter_replica_table', 'id');
COMMIT;
SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='public' LIMIT 1$$);
DROP TABLE tt1;
DROP TABLE tt2;
DROP TABLE alter_replica_table;
DROP SCHEMA sc CASCADE;
DROP SCHEMA sc2 CASCADE;
DROP SCHEMA sc3 CASCADE;
DROP SCHEMA sc4 CASCADE;
DROP SCHEMA sc5 CASCADE;
DROP SCHEMA sc6 CASCADE;

View File

@ -58,7 +58,7 @@ BEGIN;
SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id();
\c - - - :master_port
\c - - :master_host :master_port
SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id();

View File

@ -105,12 +105,12 @@ SELECT create_distributed_table('foreign_table', 'id');
ALTER FOREIGN TABLE foreign_table rename to renamed_foreign_table;
ALTER FOREIGN TABLE renamed_foreign_table rename full_name to rename_name;
ALTER FOREIGN TABLE renamed_foreign_table alter rename_name type char(8);
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
select table_name, column_name, data_type
from information_schema.columns
where table_schema='public' and table_name like 'renamed_foreign_table_%' and column_name <> 'id'
order by table_name;
\c - - - :master_port
\c - - :master_host :master_port
SELECT master_get_table_ddl_events('renamed_foreign_table');
@ -122,12 +122,12 @@ SELECT master_get_table_ddl_events('local_view');
-- clean up
DROP VIEW IF EXISTS local_view;
DROP FOREIGN TABLE IF EXISTS renamed_foreign_table;
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
select table_name, column_name, data_type
from information_schema.columns
where table_schema='public' and table_name like 'renamed_foreign_table_%' and column_name <> 'id'
order by table_name;
\c - - - :master_port
\c - - :master_host :master_port
DROP TABLE IF EXISTS simple_table, not_null_table, column_constraint_table,
table_constraint_table, default_value_table, pkey_table,
unique_table, clustered_table, fiddly_table;

View File

@ -14,9 +14,9 @@ CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 (
SELECT master_create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash');
SELECT master_create_worker_shards('too_long_12345678901234567890123456789012345678901234567890', '2', '2');
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
\dt too_long_*
\c - - - :master_port
\c - - :master_host :master_port
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 2;
@ -52,18 +52,18 @@ ALTER TABLE name_lengths ADD UNIQUE (float_col_123456789012345678901234567890123
ALTER TABLE name_lengths ADD EXCLUDE (int_col_12345678901234567890123456789012345678901234567890 WITH =);
ALTER TABLE name_lengths ADD CHECK (date_col_12345678901234567890123456789012345678901234567890 > '2014-01-01'::date);
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.name_lengths_225002'::regclass ORDER BY 1 DESC, 2 DESC;
\c - - - :master_port
\c - - :master_host :master_port
-- Placeholders for unsupported add constraints with EXPLICIT names that are too long
ALTER TABLE name_lengths ADD CONSTRAINT nl_unique_12345678901234567890123456789012345678901234567890 UNIQUE (float_col_12345678901234567890123456789012345678901234567890);
ALTER TABLE name_lengths ADD CONSTRAINT nl_exclude_12345678901234567890123456789012345678901234567890 EXCLUDE (int_col_12345678901234567890123456789012345678901234567890 WITH =);
ALTER TABLE name_lengths ADD CONSTRAINT nl_checky_12345678901234567890123456789012345678901234567890 CHECK (date_col_12345678901234567890123456789012345678901234567890 >= '2014-01-01'::date);
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.name_lengths_225002'::regclass ORDER BY 1 DESC, 2 DESC;
\c - - - :master_port
\c - - :master_host :master_port
-- Placeholders for RENAME operations
\set VERBOSITY TERSE
@ -76,19 +76,19 @@ ALTER TABLE name_lengths RENAME CONSTRAINT unique_123456789012345678901234567890
CREATE INDEX tmp_idx_12345678901234567890123456789012345678901234567890 ON name_lengths(col2);
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE
relname LIKE 'tmp_idx_%' ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC;
\c - - - :master_port
\c - - :master_host :master_port
-- Verify that a new index name > 63 characters is auto-truncated
-- by the parser/rewriter before further processing, just as in Postgres.
CREATE INDEX tmp_idx_123456789012345678901234567890123456789012345678901234567890 ON name_lengths(col2);
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE
relname LIKE 'tmp_idx_%' ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC;
\c - - - :master_port
\c - - :master_host :master_port
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 2;
@ -116,10 +116,10 @@ SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.sneaky_n
SELECT master_create_distributed_table('sneaky_name_lengths', 'int_col_123456789012345678901234567890123456789012345678901234', 'hash');
SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2');
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
\di public.sneaky*225006
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.sneaky_name_lengths_225006'::regclass ORDER BY 1 DESC, 2 DESC;
\c - - - :master_port
\c - - :master_host :master_port
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 2;
@ -135,9 +135,9 @@ CREATE TABLE sneaky_name_lengths (
);
SELECT create_distributed_table('sneaky_name_lengths', 'col1', 'hash');
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
\di unique*225008
\c - - - :master_port
\c - - :master_host :master_port
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 2;
@ -151,9 +151,9 @@ CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 (
col2 integer not null);
SELECT create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash');
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
\dt *225000000000*
\c - - - :master_port
\c - - :master_host :master_port
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 2;
@ -171,10 +171,10 @@ SELECT shard_name(U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B
FROM pg_dist_shard
WHERE logicalrelid = U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!'::regclass;
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
\dt public.elephant_*
\di public.elephant_*
\c - - - :master_port
\c - - :master_host :master_port
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 2;

View File

@ -65,7 +65,7 @@ CREATE TABLE repartition_udt_other (
-- proceed with type creation as above; thus the OIDs will be different.
-- so that the OID is off.
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
-- START type creation
-- ... as well as a function to use as its comparator...
@ -109,7 +109,7 @@ FUNCTION 1 test_udt_hash(test_udt);
-- END type creation
\c - - - :worker_2_port
\c - - :public_worker_2_host :worker_2_port
-- START type creation
-- ... as well as a function to use as its comparator...
@ -155,7 +155,7 @@ FUNCTION 1 test_udt_hash(test_udt);
-- Connect to master
\c - - - :master_port
\c - - :master_host :master_port
-- Distribute and populate the two tables.
SET citus.shard_count TO 3;

View File

@ -7,7 +7,7 @@ SET citus.next_shard_id TO 830000;
-- Create UDF in master and workers
\c - - - :master_port
\c - - :master_host :master_port
DROP FUNCTION IF EXISTS median(double precision[]);
CREATE FUNCTION median(double precision[]) RETURNS double precision
@ -18,7 +18,7 @@ LANGUAGE sql IMMUTABLE AS $_$
OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub;
$_$;
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
DROP FUNCTION IF EXISTS median(double precision[]);
CREATE FUNCTION median(double precision[]) RETURNS double precision
@ -29,7 +29,7 @@ LANGUAGE sql IMMUTABLE AS $_$
OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub;
$_$;
\c - - - :worker_2_port
\c - - :public_worker_2_host :worker_2_port
DROP FUNCTION IF EXISTS median(double precision[]);
CREATE FUNCTION median(double precision[]) RETURNS double precision
@ -41,7 +41,7 @@ LANGUAGE sql IMMUTABLE AS $_$
$_$;
-- Run query on master
\c - - - :master_port
\c - - :master_host :master_port
SET citus.task_executor_type TO 'task-tracker';

View File

@ -88,43 +88,6 @@ BEGIN
END;
$func$;
CREATE OR REPLACE FUNCTION wait_until_metadata_sync(timeout INTEGER DEFAULT 15000)
RETURNS void
LANGUAGE C STRICT
AS 'citus';
-- set sync intervals to less than 15s so wait_until_metadata_sync never times out
ALTER SYSTEM SET citus.metadata_sync_interval TO 3000;
ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 500;
SELECT pg_reload_conf();
-- Verifies pg_dist_node and pg_dist_palcement in the given worker matches the ones in coordinator
CREATE OR REPLACE FUNCTION verify_metadata(hostname TEXT, port INTEGER, master_port INTEGER DEFAULT 57636)
RETURNS BOOLEAN
LANGUAGE sql
AS $$
SELECT wait_until_metadata_sync();
WITH dist_node_summary AS (
SELECT 'SELECT jsonb_agg(ROW(nodeid, groupid, nodename, nodeport, isactive) ORDER BY nodeid) FROM pg_dist_node' as query
), dist_node_check AS (
SELECT count(distinct result) = 1 AS matches
FROM dist_node_summary CROSS JOIN LATERAL
master_run_on_worker(ARRAY[hostname, 'localhost'], ARRAY[port, master_port],
ARRAY[dist_node_summary.query, dist_node_summary.query],
false)
), dist_placement_summary AS (
SELECT 'SELECT jsonb_agg(pg_dist_placement ORDER BY shardid) FROM pg_dist_placement)' AS query
), dist_placement_check AS (
SELECT count(distinct result) = 1 AS matches
FROM dist_placement_summary CROSS JOIN LATERAL
master_run_on_worker(ARRAY[hostname, 'localhost'], ARRAY[port, master_port],
ARRAY[dist_placement_summary.query, dist_placement_summary.query],
false)
)
SELECT dist_node_check.matches AND dist_placement_check.matches
FROM dist_node_check CROSS JOIN dist_placement_check
$$;
--
-- Procedure for creating shards for range partitioned distributed table.
--
@ -141,17 +104,3 @@ BEGIN
END LOOP;
END;
$$ LANGUAGE plpgsql;
-- partition_task_list_results tests the internal PartitionTasklistResults function
CREATE OR REPLACE FUNCTION pg_catalog.partition_task_list_results(resultIdPrefix text,
query text,
target_table regclass,
binaryFormat bool DEFAULT true)
RETURNS TABLE(resultId text,
nodeId int,
rowCount bigint,
targetShardId bigint,
targetShardIndex int)
LANGUAGE C STRICT VOLATILE
AS 'citus', $$partition_task_list_results$$;

View File

@ -0,0 +1,50 @@
CREATE OR REPLACE FUNCTION wait_until_metadata_sync(timeout INTEGER DEFAULT 15000)
RETURNS void
LANGUAGE C STRICT
AS 'citus';
-- set sync intervals to less than 15s so wait_until_metadata_sync never times out
ALTER SYSTEM SET citus.metadata_sync_interval TO 3000;
ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 500;
SELECT pg_reload_conf();
-- Verifies pg_dist_node and pg_dist_palcement in the given worker matches the ones in coordinator
CREATE OR REPLACE FUNCTION verify_metadata(hostname TEXT, port INTEGER, master_port INTEGER DEFAULT 57636)
RETURNS BOOLEAN
LANGUAGE sql
AS $$
SELECT wait_until_metadata_sync();
WITH dist_node_summary AS (
SELECT 'SELECT jsonb_agg(ROW(nodeid, groupid, nodename, nodeport, isactive) ORDER BY nodeid) FROM pg_dist_node' as query
), dist_node_check AS (
SELECT count(distinct result) = 1 AS matches
FROM dist_node_summary CROSS JOIN LATERAL
master_run_on_worker(ARRAY[hostname, 'localhost'], ARRAY[port, master_port],
ARRAY[dist_node_summary.query, dist_node_summary.query],
false)
), dist_placement_summary AS (
SELECT 'SELECT jsonb_agg(pg_dist_placement ORDER BY shardid) FROM pg_dist_placement)' AS query
), dist_placement_check AS (
SELECT count(distinct result) = 1 AS matches
FROM dist_placement_summary CROSS JOIN LATERAL
master_run_on_worker(ARRAY[hostname, 'localhost'], ARRAY[port, master_port],
ARRAY[dist_placement_summary.query, dist_placement_summary.query],
false)
)
SELECT dist_node_check.matches AND dist_placement_check.matches
FROM dist_node_check CROSS JOIN dist_placement_check
$$;
-- partition_task_list_results tests the internal PartitionTasklistResults function
CREATE OR REPLACE FUNCTION pg_catalog.partition_task_list_results(resultIdPrefix text,
query text,
target_table regclass,
binaryFormat bool DEFAULT true)
RETURNS TABLE(resultId text,
nodeId int,
rowCount bigint,
targetShardId bigint,
targetShardIndex int)
LANGUAGE C STRICT VOLATILE
AS 'citus', $$partition_task_list_results$$;

View File

@ -131,7 +131,7 @@ SELECT master_create_distributed_table('second_dustbunnies', 'id', 'hash');
SELECT master_create_worker_shards('second_dustbunnies', 1, 2);
-- run VACUUM and ANALYZE against the table on the master
\c - - - :master_port
\c - - :master_host :master_port
SET citus.log_remote_commands TO ON;
VACUUM dustbunnies;
@ -143,21 +143,21 @@ ANALYZE dustbunnies;
VACUUM (FULL) dustbunnies;
VACUUM ANALYZE dustbunnies;
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
-- disable auto-VACUUM for next test
ALTER TABLE dustbunnies_990002 SET (autovacuum_enabled = false);
SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid='dustbunnies_990002'::regclass
\gset
-- send a VACUUM FREEZE after adding a new row
\c - - - :master_port
\c - - :master_host :master_port
SET citus.log_remote_commands TO ON;
INSERT INTO dustbunnies VALUES (5, 'peter');
VACUUM (FREEZE) dustbunnies;
-- verify that relfrozenxid increased
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class
WHERE oid='dustbunnies_990002'::regclass;
@ -166,18 +166,18 @@ SELECT attname, null_frac FROM pg_stats
WHERE tablename = 'dustbunnies_990002' ORDER BY attname;
-- add NULL values, then perform column-specific ANALYZE
\c - - - :master_port
\c - - :master_host :master_port
SET citus.log_remote_commands TO ON;
INSERT INTO dustbunnies VALUES (6, NULL, NULL);
ANALYZE dustbunnies (name);
-- verify that name's NULL ratio is updated but age's is not
\c - - - :worker_1_port
\c - - :public_worker_1_host :worker_1_port
SELECT attname, null_frac FROM pg_stats
WHERE tablename = 'dustbunnies_990002' ORDER BY attname;
\c - - - :master_port
\c - - :master_host :master_port
SET citus.log_remote_commands TO ON;
-- verify warning for unqualified VACUUM