-- -- MULTI_SCHEMA_SUPPORT -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1190000; -- create schema to test schema support CREATE SCHEMA test_schema_support; -- test COPY with schema -- create local table to append CREATE TABLE public.nation_local( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152) ); \copy public.nation_local FROM STDIN with delimiter '|'; CREATE TABLE test_schema_support.nation_append( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152) ); SELECT create_distributed_table('test_schema_support.nation_append', 'n_nationkey', 'append'); create_distributed_table --------------------------------------------------------------------- (1 row) SELECT master_create_empty_shard('test_schema_support.nation_append') as simple_shardid \gset -- append table to shard copy test_schema_support.nation_append FROM STDIN with (append_to_shard :simple_shardid, delimiter '|'); -- verify table actually appended to shard SELECT COUNT(*) FROM test_schema_support.nation_append; count --------------------------------------------------------------------- 6 (1 row) -- test with shard name contains special characters CREATE TABLE test_schema_support."nation._'append" ( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152)); SELECT create_distributed_table('test_schema_support."nation._''append"', 'n_nationkey', 'append'); create_distributed_table --------------------------------------------------------------------- (1 row) SELECT master_create_empty_shard('test_schema_support."nation._''append"') as special_shardid \gset copy test_schema_support."nation._'append" FROM STDIN with (append_to_shard :special_shardid, delimiter '|'); -- verify table actually appended to shard SELECT COUNT(*) FROM test_schema_support."nation._'append"; count --------------------------------------------------------------------- 6 (1 row) -- test COPY with schema with search_path is set SET search_path TO test_schema_support; copy nation_append FROM STDIN with (append_to_shard :simple_shardid, delimiter '|'); -- verify table actually appended to shard SELECT COUNT(*) FROM nation_append; count --------------------------------------------------------------------- 12 (1 row) -- test with search_path is set and shard name contains special characters copy "nation._'append" FROM STDIN with (append_to_shard :special_shardid, delimiter '|'); -- verify table actually appended to shard SELECT COUNT(*) FROM "nation._'append"; count --------------------------------------------------------------------- 12 (1 row) -- test shard creation on append(by data loading) and hash distributed(with UDF) tables -- when search_path is set SET search_path TO test_schema_support; -- create shard with COPY on append distributed table CREATE TABLE nation_append_search_path( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152) ); SELECT create_distributed_table('nation_append_search_path', 'n_nationkey', 'append'); create_distributed_table --------------------------------------------------------------------- (1 row) SELECT master_create_empty_shard('nation_append_search_path') AS shardid \gset copy nation_append_search_path FROM STDIN with (delimiter '|', append_to_shard :shardid); CREATE TABLE test_schema_support.nation_hash( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152) ); SET citus.shard_replication_factor TO 2; SELECT create_distributed_table('test_schema_support.nation_hash', 'n_nationkey', shard_count:=4); create_distributed_table --------------------------------------------------------------------- (1 row) RESET citus.shard_replication_factor; -- test cursors SET search_path TO public; BEGIN; DECLARE test_cursor CURSOR FOR SELECT * FROM test_schema_support.nation_append WHERE n_nationkey = 1; FETCH test_cursor; n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) FETCH test_cursor; n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) FETCH BACKWARD test_cursor; n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) END; -- test with search_path is set SET search_path TO test_schema_support; BEGIN; DECLARE test_cursor CURSOR FOR SELECT * FROM nation_append WHERE n_nationkey = 1; FETCH test_cursor; n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) FETCH test_cursor; n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) FETCH BACKWARD test_cursor; n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) END; -- test inserting to table in different schema SET search_path TO public; INSERT INTO test_schema_support.nation_hash(n_nationkey, n_name, n_regionkey) VALUES (6, 'FRANCE', 3); -- verify insertion SELECT * FROM test_schema_support.nation_hash WHERE n_nationkey = 6; n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 6 | FRANCE | 3 | (1 row) -- test with search_path is set SET search_path TO test_schema_support; INSERT INTO nation_hash(n_nationkey, n_name, n_regionkey) VALUES (7, 'GERMANY', 3); -- verify insertion SELECT * FROM nation_hash WHERE n_nationkey = 7; n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 7 | GERMANY | 3 | (1 row) -- test UDFs with schemas SET search_path TO public; \copy test_schema_support.nation_hash FROM STDIN with delimiter '|'; -- create UDF in master node CREATE OR REPLACE FUNCTION dummyFunction(theValue integer) RETURNS text AS $$ DECLARE strresult text; BEGIN RETURN theValue * 3 / 2 + 1; END; $$ LANGUAGE 'plpgsql' IMMUTABLE; -- UDF in public, table in a schema other than public, search_path is not set SELECT dummyFunction(n_nationkey) FROM test_schema_support.nation_hash GROUP BY 1 ORDER BY 1; dummyfunction --------------------------------------------------------------------- 1 10 11 2 4 5 7 8 (8 rows) -- UDF in public, table in a schema other than public, search_path is set SET search_path TO test_schema_support; SELECT public.dummyFunction(n_nationkey) FROM test_schema_support.nation_hash GROUP BY 1 ORDER BY 1; dummyfunction --------------------------------------------------------------------- 1 10 11 2 4 5 7 8 (8 rows) -- create UDF in master node in schema SET search_path TO test_schema_support; CREATE OR REPLACE FUNCTION dummyFunction2(theValue integer) RETURNS text AS $$ DECLARE strresult text; BEGIN RETURN theValue * 3 / 2 + 1; END; $$ LANGUAGE 'plpgsql' IMMUTABLE; -- UDF in schema, table in a schema other than public, search_path is not set SET search_path TO public; SELECT test_schema_support.dummyFunction2(n_nationkey) FROM test_schema_support.nation_hash GROUP BY 1 ORDER BY 1; dummyfunction2 --------------------------------------------------------------------- 1 10 11 2 4 5 7 8 (8 rows) -- UDF in schema, table in a schema other than public, search_path is set SET search_path TO test_schema_support; SELECT dummyFunction2(n_nationkey) FROM nation_hash GROUP BY 1 ORDER BY 1; dummyfunction2 --------------------------------------------------------------------- 1 10 11 2 4 5 7 8 (8 rows) -- test operators with schema SET search_path TO public; -- create operator in master CREATE OPERATOR test_schema_support.=== ( LEFTARG = int, RIGHTARG = int, PROCEDURE = int4eq, COMMUTATOR = ===, NEGATOR = !==, HASHES, MERGES ); -- create operator in worker node 1 \c - - - :worker_1_port CREATE OPERATOR test_schema_support.=== ( LEFTARG = int, RIGHTARG = int, PROCEDURE = int4eq, COMMUTATOR = ===, NEGATOR = !==, HASHES, MERGES ); -- create operator in worker node 2 \c - - - :worker_2_port CREATE OPERATOR test_schema_support.=== ( LEFTARG = int, RIGHTARG = int, PROCEDURE = int4eq, COMMUTATOR = ===, NEGATOR = !==, HASHES, MERGES ); \c - - - :master_port -- test with search_path is not set SELECT * FROM test_schema_support.nation_hash WHERE n_nationkey OPERATOR(test_schema_support.===) 1; n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) -- test with search_path is set SET search_path TO test_schema_support; SELECT * FROM nation_hash WHERE n_nationkey OPERATOR(===) 1; n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) -- test with multi-shard DML SET search_path TO public; UPDATE test_schema_support.nation_hash SET n_regionkey = n_regionkey + 1; --verify modification SELECT * FROM test_schema_support.nation_hash ORDER BY 1,2,3,4; n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 0 | ALGERIA | 1 | haggle. carefully final deposits detect slyly agai 1 | ARGENTINA | 2 | al foxes promise slyly according to the regular accounts. bold requests alon 2 | BRAZIL | 2 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 3 | CANADA | 2 | eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold 4 | EGYPT | 5 | y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 5 | ETHIOPIA | 1 | ven packages wake quickly. regu 6 | FRANCE | 4 | 7 | GERMANY | 4 | (8 rows) --test with search_path is set SET search_path TO test_schema_support; UPDATE nation_hash SET n_regionkey = n_regionkey + 1; --verify modification SELECT * FROM nation_hash ORDER BY 1,2,3,4; n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 0 | ALGERIA | 2 | haggle. carefully final deposits detect slyly agai 1 | ARGENTINA | 3 | al foxes promise slyly according to the regular accounts. bold requests alon 2 | BRAZIL | 3 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 3 | CANADA | 3 | eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold 4 | EGYPT | 6 | y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 5 | ETHIOPIA | 2 | ven packages wake quickly. regu 6 | FRANCE | 5 | 7 | GERMANY | 5 | (8 rows) --test COLLATION with schema SET search_path TO public; SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 \gset SELECT substring(:'server_version', '\d+')::int >= 17 AS server_version_ge_17 \gset \if :server_version_ge_17 -- PG17 renamed colliculocale to colllocale -- Relevant PG commit: -- https://github.com/postgres/postgres/commit/f696c0cd5f299f1b51e214efc55a22a782cc175d SELECT quote_ident((SELECT CASE WHEN datlocprovider='i' THEN datlocale ELSE datcollate END FROM pg_database WHERE datname = current_database())) as current_locale \gset \elif :server_version_ge_16 -- In PG16, read-only server settings lc_collate and lc_ctype are removed -- Relevant PG commit: b0f6c437160db640d4ea3e49398ebc3ba39d1982 SELECT quote_ident((SELECT CASE WHEN datlocprovider='i' THEN daticulocale ELSE datcollate END FROM pg_database WHERE datname = current_database())) as current_locale \gset \else SELECT quote_ident(current_setting('lc_collate')) as current_locale \gset \endif CREATE COLLATION test_schema_support.english (LOCALE = :current_locale); \c - - - :master_port SET citus.shard_replication_factor TO 2; CREATE TABLE test_schema_support.nation_hash_collation( n_nationkey integer not null, n_name char(25) not null COLLATE test_schema_support.english, n_regionkey integer not null, n_comment varchar(152) ); SELECT master_get_table_ddl_events('test_schema_support.nation_hash_collation') ORDER BY 1; master_get_table_ddl_events --------------------------------------------------------------------- ALTER TABLE test_schema_support.nation_hash_collation OWNER TO postgres CREATE TABLE test_schema_support.nation_hash_collation (n_nationkey integer NOT NULL, n_name character(25) NOT NULL COLLATE test_schema_support.english, n_regionkey integer NOT NULL, n_comment character varying(152)) USING heap (2 rows) SELECT create_distributed_table('test_schema_support.nation_hash_collation', 'n_nationkey', 'hash', shard_count := 4, colocate_with := 'none'); create_distributed_table --------------------------------------------------------------------- (1 row) \copy test_schema_support.nation_hash_collation FROM STDIN with delimiter '|'; SELECT * FROM test_schema_support.nation_hash_collation ORDER BY 1,2,3,4; n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 3 | CANADA | 1 | eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold 4 | EGYPT | 4 | y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 5 | ETHIOPIA | 0 | ven packages wake quickly. regu (6 rows) SELECT n_comment FROM test_schema_support.nation_hash_collation ORDER BY n_comment COLLATE test_schema_support.english; n_comment --------------------------------------------------------------------- al foxes promise slyly according to the regular accounts. bold requests alon eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold haggle. carefully final deposits detect slyly agai ven packages wake quickly. regu y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special (6 rows) --test with search_path is set SET search_path TO test_schema_support; CREATE TABLE nation_hash_collation_search_path( n_nationkey integer not null, n_name char(25) not null COLLATE english, n_regionkey integer not null, n_comment varchar(152) ); SET citus.shard_replication_factor TO 2; SELECT create_distributed_table('nation_hash_collation_search_path', 'n_nationkey', 'hash', shard_count := 4, colocate_with := 'none'); create_distributed_table --------------------------------------------------------------------- (1 row) \copy nation_hash_collation_search_path FROM STDIN with delimiter '|'; SELECT * FROM nation_hash_collation_search_path ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC; n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 5 | ETHIOPIA | 0 | ven packages wake quickly. regu 4 | EGYPT | 4 | y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 3 | CANADA | 1 | eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai (6 rows) SELECT n_comment FROM nation_hash_collation_search_path ORDER BY n_comment COLLATE english; n_comment --------------------------------------------------------------------- al foxes promise slyly according to the regular accounts. bold requests alon eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold haggle. carefully final deposits detect slyly agai ven packages wake quickly. regu y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special (6 rows) --test composite types with schema SET search_path TO public; CREATE TYPE test_schema_support.new_composite_type as (key1 text, key2 text); CREATE TABLE test_schema_support.nation_hash_composite_types( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152), test_col test_schema_support.new_composite_type ); SELECT create_distributed_table('test_schema_support.nation_hash_composite_types', 'n_nationkey', 'hash', shard_count := 4, colocate_with := 'none'); create_distributed_table --------------------------------------------------------------------- (1 row) -- insert some data to verify composite type queries \copy test_schema_support.nation_hash_composite_types FROM STDIN with delimiter '|'; SELECT * FROM test_schema_support.nation_hash_composite_types WHERE test_col = '(a,a)'::test_schema_support.new_composite_type; n_nationkey | n_name | n_regionkey | n_comment | test_col --------------------------------------------------------------------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai | (a,a) (1 row) --test with search_path is set SET search_path TO test_schema_support; SELECT * FROM nation_hash_composite_types WHERE test_col = '(a,a)'::new_composite_type; n_nationkey | n_name | n_regionkey | n_comment | test_col --------------------------------------------------------------------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai | (a,a) (1 row) -- test ALTER TABLE ADD/DROP queries with schemas SET search_path TO public; ALTER TABLE test_schema_support.nation_hash ADD COLUMN new_col INT; -- verify column is added SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash'::regclass; Column | Type | Modifiers --------------------------------------------------------------------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null n_comment | character varying(152) | new_col | integer | (5 rows) \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass; Column | Type | Modifiers --------------------------------------------------------------------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null n_comment | character varying(152) | new_col | integer | (5 rows) \c - - - :master_port ALTER TABLE test_schema_support.nation_hash DROP COLUMN IF EXISTS non_existent_column; NOTICE: column "non_existent_column" of relation "nation_hash" does not exist, skipping ALTER TABLE test_schema_support.nation_hash DROP COLUMN IF EXISTS new_col; -- verify column is dropped SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash'::regclass; Column | Type | Modifiers --------------------------------------------------------------------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null n_comment | character varying(152) | (4 rows) \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass; Column | Type | Modifiers --------------------------------------------------------------------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null n_comment | character varying(152) | (4 rows) \c - - - :master_port --test with search_path is set SET search_path TO test_schema_support; ALTER TABLE nation_hash ADD COLUMN new_col INT; -- verify column is added SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash'::regclass; Column | Type | Modifiers --------------------------------------------------------------------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null n_comment | character varying(152) | new_col | integer | (5 rows) \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass; Column | Type | Modifiers --------------------------------------------------------------------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null n_comment | character varying(152) | new_col | integer | (5 rows) \c - - - :master_port SET search_path TO test_schema_support; ALTER TABLE nation_hash DROP COLUMN IF EXISTS non_existent_column; NOTICE: column "non_existent_column" of relation "nation_hash" does not exist, skipping ALTER TABLE nation_hash DROP COLUMN IF EXISTS new_col; -- verify column is dropped SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash'::regclass; Column | Type | Modifiers --------------------------------------------------------------------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null n_comment | character varying(152) | (4 rows) \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass; Column | Type | Modifiers --------------------------------------------------------------------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null n_comment | character varying(152) | (4 rows) \c - - - :master_port -- test CREATE/DROP INDEX with schemas SET search_path TO public; -- CREATE index CREATE INDEX index1 ON test_schema_support.nation_hash(n_name); --verify INDEX is created SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'test_schema_support.index1'::regclass; Column | Type | Definition --------------------------------------------------------------------- n_name | character(25) | n_name (1 row) \c - - - :worker_1_port SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'test_schema_support.index1_1190003'::regclass; Column | Type | Definition --------------------------------------------------------------------- n_name | character(25) | n_name (1 row) \c - - - :master_port -- DROP index DROP INDEX test_schema_support.index1; --verify INDEX is dropped \d test_schema_support.index1 \c - - - :worker_1_port \d test_schema_support.index1_1190003 \c - - - :master_port --test with search_path is set SET search_path TO test_schema_support; -- CREATE index CREATE INDEX index1 ON nation_hash(n_name); --verify INDEX is created SELECT "Column", "Type", "Definition" FROM public.index_attrs WHERE relid = 'test_schema_support.index1'::regclass; Column | Type | Definition --------------------------------------------------------------------- n_name | character(25) | n_name (1 row) \c - - - :worker_1_port SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'test_schema_support.index1_1190003'::regclass; Column | Type | Definition --------------------------------------------------------------------- n_name | character(25) | n_name (1 row) \c - - - :master_port -- DROP index SET search_path TO test_schema_support; DROP INDEX index1; --verify INDEX is dropped \d test_schema_support.index1 \c - - - :worker_1_port \d test_schema_support.index1_1190003 \c - - - :master_port -- test citus_copy_shard_placement with schemas SET search_path TO public; -- delete placements DELETE FROM pg_dist_shard_placement WHERE shardid = 1190000 and nodeport = :worker_1_port; SELECT citus_copy_shard_placement(1190000, 'localhost', :worker_2_port, 'localhost', :worker_1_port, transfer_mode := 'block_writes'); citus_copy_shard_placement --------------------------------------------------------------------- (1 row) -- verify shardstate SELECT shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid = 1190000 ORDER BY nodeport; shardstate | nodename | nodeport --------------------------------------------------------------------- 1 | localhost | 57637 1 | localhost | 57638 (2 rows) --test with search_path is set SET search_path TO test_schema_support; -- mark shard as inactive DELETE FROM pg_dist_shard_placement WHERE shardid = 1190000 and nodeport = :worker_1_port; SELECT citus_copy_shard_placement(1190000, 'localhost', :worker_2_port, 'localhost', :worker_1_port, transfer_mode := 'block_writes'); citus_copy_shard_placement --------------------------------------------------------------------- (1 row) -- verify shardstate SELECT shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid = 1190000 ORDER BY nodeport; shardstate | nodename | nodeport --------------------------------------------------------------------- 1 | localhost | 57637 1 | localhost | 57638 (2 rows) -- check joins of tables which are in schemas other than public -- we create new tables with replication factor of 1 -- so that we guarantee to have repartitions when necessary -- create necessary objects and load data to them CREATE SCHEMA test_schema_support_join_1; CREATE SCHEMA test_schema_support_join_2; SET citus.shard_count to 4; SET citus.shard_replication_factor to 1; CREATE TABLE test_schema_support_join_1.nation_hash ( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152)); CREATE TABLE test_schema_support_join_1.nation_hash_2 ( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152)); CREATE TABLE test_schema_support_join_2.nation_hash ( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152)); SELECT create_distributed_table('test_schema_support_join_1.nation_hash', 'n_nationkey'); create_distributed_table --------------------------------------------------------------------- (1 row) \copy test_schema_support_join_1.nation_hash FROM STDIN with delimiter '|'; SELECT create_distributed_table('test_schema_support_join_1.nation_hash_2', 'n_nationkey'); create_distributed_table --------------------------------------------------------------------- (1 row) \copy test_schema_support_join_1.nation_hash_2 FROM STDIN with delimiter '|'; SELECT create_distributed_table('test_schema_support_join_2.nation_hash', 'n_nationkey'); create_distributed_table --------------------------------------------------------------------- (1 row) \copy test_schema_support_join_2.nation_hash FROM STDIN with delimiter '|'; SET citus.enable_repartition_joins to ON; -- check when search_path is public, -- join of two tables which are in different schemas, -- join on partition column SET search_path TO public; SELECT count (*) FROM test_schema_support_join_1.nation_hash n1, test_schema_support_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_nationkey; count --------------------------------------------------------------------- 6 (1 row) -- check when search_path is different than public, -- join of two tables which are in different schemas, -- join on partition column SET search_path TO test_schema_support_join_1; SELECT count (*) FROM nation_hash n1, test_schema_support_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_nationkey; count --------------------------------------------------------------------- 6 (1 row) -- check when search_path is public, -- join of two tables which are in same schemas, -- join on partition column SET search_path TO public; SELECT count (*) FROM test_schema_support_join_1.nation_hash n1, test_schema_support_join_1.nation_hash_2 n2 WHERE n1.n_nationkey = n2.n_nationkey; count --------------------------------------------------------------------- 6 (1 row) -- check when search_path is different than public, -- join of two tables which are in same schemas, -- join on partition column SET search_path TO test_schema_support_join_1; SELECT count (*) FROM nation_hash n1, nation_hash_2 n2 WHERE n1.n_nationkey = n2.n_nationkey; count --------------------------------------------------------------------- 6 (1 row) -- single repartition joins -- check when search_path is public, -- join of two tables which are in different schemas, -- join on partition column and non-partition column SET search_path TO public; SELECT count (*) FROM test_schema_support_join_1.nation_hash n1, test_schema_support_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_regionkey; count --------------------------------------------------------------------- 6 (1 row) -- check when search_path is different than public, -- join of two tables which are in different schemas, -- join on partition column and non-partition column SET search_path TO test_schema_support_join_1; SELECT count (*) FROM nation_hash n1, test_schema_support_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_regionkey; count --------------------------------------------------------------------- 6 (1 row) -- check when search_path is different than public, -- join of two tables which are in same schemas, -- join on partition column and non-partition column SET search_path TO test_schema_support_join_1; SELECT count (*) FROM nation_hash n1, nation_hash_2 n2 WHERE n1.n_nationkey = n2.n_regionkey; count --------------------------------------------------------------------- 6 (1 row) -- hash repartition joins -- check when search_path is public, -- join of two tables which are in different schemas, -- join on non-partition column SET search_path TO public; SELECT count (*) FROM test_schema_support_join_1.nation_hash n1, test_schema_support_join_2.nation_hash n2 WHERE n1.n_regionkey = n2.n_regionkey; count --------------------------------------------------------------------- 14 (1 row) -- check when search_path is different than public, -- join of two tables which are in different schemas, -- join on non-partition column SET search_path TO test_schema_support_join_1; SELECT count (*) FROM nation_hash n1, test_schema_support_join_2.nation_hash n2 WHERE n1.n_regionkey = n2.n_regionkey; count --------------------------------------------------------------------- 14 (1 row) -- check when search_path is different than public, -- join of two tables which are in same schemas, -- join on non-partition column SET search_path TO test_schema_support_join_1; SELECT count (*) FROM nation_hash n1, nation_hash_2 n2 WHERE n1.n_regionkey = n2.n_regionkey; count --------------------------------------------------------------------- 14 (1 row) -- set task_executor back to adaptive -- test ALTER TABLE SET SCHEMA SET search_path TO public; CREATE SCHEMA old_schema; CREATE TABLE old_schema.table_set_schema(id int); SELECT create_distributed_table('old_schema.table_set_schema', 'id'); create_distributed_table --------------------------------------------------------------------- (1 row) CREATE SCHEMA new_schema; SELECT objid::oid::regnamespace as "Distributed Schemas" FROM pg_catalog.pg_dist_object WHERE objid::oid::regnamespace IN ('old_schema', 'new_schema') ORDER BY objid::oid::regnamespace; Distributed Schemas --------------------------------------------------------------------- old_schema new_schema (2 rows) \c - - - :worker_1_port SELECT table_schema AS "Shards' Schema" FROM information_schema.tables WHERE table_name LIKE 'table\_set\_schema\_%' AND table_schema IN ('old_schema', 'new_schema', 'public') GROUP BY table_schema; Shards' Schema --------------------------------------------------------------------- old_schema (1 row) \c - - - :master_port ALTER TABLE old_schema.table_set_schema SET SCHEMA new_schema; SELECT objid::oid::regnamespace as "Distributed Schemas" FROM pg_catalog.pg_dist_object WHERE objid::oid::regnamespace IN ('old_schema', 'new_schema') ORDER BY objid::oid::regnamespace; Distributed Schemas --------------------------------------------------------------------- old_schema new_schema (2 rows) \c - - - :worker_1_port SELECT table_schema AS "Shards' Schema" FROM information_schema.tables WHERE table_name LIKE 'table\_set\_schema\_%' AND table_schema IN ('old_schema', 'new_schema', 'public') GROUP BY table_schema; Shards' Schema --------------------------------------------------------------------- new_schema (1 row) \c - - - :master_port SELECT * FROM new_schema.table_set_schema; id --------------------------------------------------------------------- (0 rows) DROP SCHEMA old_schema CASCADE; DROP SCHEMA new_schema CASCADE; NOTICE: drop cascades to table new_schema.table_set_schema -- test ALTER TABLE SET SCHEMA from public CREATE TABLE table_set_schema(id int); SELECT create_distributed_table('table_set_schema', 'id'); create_distributed_table --------------------------------------------------------------------- (1 row) CREATE SCHEMA new_schema; SELECT objid::oid::regnamespace as "Distributed Schemas" FROM pg_catalog.pg_dist_object WHERE objid='new_schema'::regnamespace::oid; Distributed Schemas --------------------------------------------------------------------- new_schema (1 row) \c - - - :worker_1_port SELECT table_schema AS "Shards' Schema" FROM information_schema.tables WHERE table_name LIKE 'table\_set\_schema\_%' AND table_schema IN ('new_schema', 'public') GROUP BY table_schema; Shards' Schema --------------------------------------------------------------------- public (1 row) \c - - - :master_port ALTER TABLE table_set_schema SET SCHEMA new_schema; SELECT objid::oid::regnamespace as "Distributed Schemas" FROM pg_catalog.pg_dist_object WHERE objid='new_schema'::regnamespace::oid; Distributed Schemas --------------------------------------------------------------------- new_schema (1 row) \c - - - :worker_1_port SELECT table_schema AS "Shards' Schema" FROM information_schema.tables WHERE table_name LIKE 'table\_set\_schema\_%' AND table_schema IN ('new_schema', 'public') GROUP BY table_schema; Shards' Schema --------------------------------------------------------------------- new_schema (1 row) \c - - - :master_port SELECT * FROM new_schema.table_set_schema; id --------------------------------------------------------------------- (0 rows) DROP SCHEMA new_schema CASCADE; NOTICE: drop cascades to table new_schema.table_set_schema SET citus.next_shard_id TO 1195000; -- test ALTER TABLE SET SCHEMA when a search path is set CREATE SCHEMA old_schema; CREATE TABLE old_schema.table_set_schema(id int); SELECT create_distributed_table('old_schema.table_set_schema', 'id'); create_distributed_table --------------------------------------------------------------------- (1 row) CREATE TABLE table_set_schema(id int); SELECT create_distributed_table('table_set_schema', 'id'); create_distributed_table --------------------------------------------------------------------- (1 row) CREATE SCHEMA new_schema; SELECT objid::oid::regnamespace as "Distributed Schemas" FROM pg_catalog.pg_dist_object WHERE objid::oid::regnamespace IN ('old_schema', 'new_schema') ORDER BY objid::oid::regnamespace; Distributed Schemas --------------------------------------------------------------------- old_schema new_schema (2 rows) \c - - - :worker_1_port SELECT table_schema AS "Shards' Schema", COUNT(*) AS "Counts" FROM information_schema.tables WHERE table_name LIKE 'table\_set\_schema\_%' AND table_schema IN ('old_schema', 'new_schema', 'public') GROUP BY table_schema; Shards' Schema | Counts --------------------------------------------------------------------- old_schema | 4 public | 4 (2 rows) \c - - - :master_port SET search_path TO old_schema; ALTER TABLE table_set_schema SET SCHEMA new_schema; SELECT objid::oid::regnamespace as "Distributed Schemas" FROM pg_catalog.pg_dist_object WHERE objid::oid::regnamespace IN ('old_schema', 'new_schema') ORDER BY objid::oid::regnamespace; Distributed Schemas --------------------------------------------------------------------- old_schema new_schema (2 rows) \c - - - :worker_1_port SELECT table_schema AS "Shards' Schema", COUNT(*) AS "Counts" FROM information_schema.tables WHERE table_name LIKE 'table\_set\_schema\_%' AND table_schema IN ('old_schema', 'new_schema', 'public') GROUP BY table_schema; Shards' Schema | Counts --------------------------------------------------------------------- new_schema | 4 public | 4 (2 rows) \c - - - :master_port SELECT * FROM new_schema.table_set_schema; id --------------------------------------------------------------------- (0 rows) SET citus.next_shard_id TO 1196000; SET search_path to public; DROP SCHEMA old_schema CASCADE; DROP SCHEMA new_schema CASCADE; NOTICE: drop cascades to table new_schema.table_set_schema DROP TABLE table_set_schema; -- test ALTER TABLE SET SCHEMA with nonexisting schemas and table -- expect all to give error CREATE SCHEMA existing_schema; CREATE SCHEMA another_existing_schema; CREATE TABLE existing_schema.table_set_schema(id int); SELECT create_distributed_table('existing_schema.table_set_schema', 'id'); create_distributed_table --------------------------------------------------------------------- (1 row) ALTER TABLE non_existent_schema.table_set_schema SET SCHEMA another_existing_schema; ERROR: schema "non_existent_schema" does not exist ALTER TABLE non_existent_schema.non_existent_table SET SCHEMA another_existing_schema; ERROR: schema "non_existent_schema" does not exist ALTER TABLE non_existent_schema.table_set_schema SET SCHEMA another_non_existent_schema; ERROR: schema "non_existent_schema" does not exist ALTER TABLE non_existent_schema.non_existent_table SET SCHEMA another_non_existent_schema; ERROR: schema "non_existent_schema" does not exist ALTER TABLE existing_schema.non_existent_table SET SCHEMA another_existing_schema; ERROR: relation "existing_schema.non_existent_table" does not exist ALTER TABLE existing_schema.non_existent_table SET SCHEMA non_existent_schema; ERROR: relation "existing_schema.non_existent_table" does not exist ALTER TABLE existing_schema.table_set_schema SET SCHEMA non_existent_schema; ERROR: schema "non_existent_schema" does not exist -- test ALTER TABLE IF EXISTS SET SCHEMA with nonexisting schemas and table ALTER TABLE IF EXISTS non_existent_schema.table_set_schema SET SCHEMA another_existing_schema; NOTICE: relation "table_set_schema" does not exist, skipping ALTER TABLE IF EXISTS non_existent_schema.non_existent_table SET SCHEMA another_existing_schema; NOTICE: relation "non_existent_table" does not exist, skipping ALTER TABLE IF EXISTS non_existent_schema.table_set_schema SET SCHEMA another_non_existent_schema; NOTICE: relation "table_set_schema" does not exist, skipping ALTER TABLE IF EXISTS non_existent_schema.non_existent_table SET SCHEMA another_non_existent_schema; NOTICE: relation "non_existent_table" does not exist, skipping ALTER TABLE IF EXISTS existing_schema.non_existent_table SET SCHEMA another_existing_schema; NOTICE: relation "non_existent_table" does not exist, skipping ALTER TABLE IF EXISTS existing_schema.non_existent_table SET SCHEMA non_existent_schema; NOTICE: relation "non_existent_table" does not exist, skipping ALTER TABLE IF EXISTS existing_schema.table_set_schema SET SCHEMA non_existent_schema; ERROR: schema "non_existent_schema" does not exist ALTER TABLE IF EXISTS non_existent_table SET SCHEMA another_existing_schema; NOTICE: relation "non_existent_table" does not exist, skipping ALTER TABLE IF EXISTS non_existent_table SET SCHEMA non_existent_schema; NOTICE: relation "non_existent_table" does not exist, skipping DROP SCHEMA existing_schema, another_existing_schema CASCADE; NOTICE: drop cascades to table existing_schema.table_set_schema -- test DROP SCHEMA with nonexisting schemas DROP SCHEMA ax, bx, cx, dx, ex, fx, gx, jx; ERROR: schema "ax" does not exist -- test ALTER TABLE SET SCHEMA with interesting names CREATE SCHEMA "cItuS.T E E N'sSchema"; CREATE SCHEMA "citus-teen's scnd schm."; CREATE TABLE "cItuS.T E E N'sSchema"."be$t''t*ble" (id int); SELECT create_distributed_table('"cItuS.T E E N''sSchema"."be$t''''t*ble"', 'id'); create_distributed_table --------------------------------------------------------------------- (1 row) ALTER TABLE "cItuS.T E E N'sSchema"."be$t''t*ble" SET SCHEMA "citus-teen's scnd schm."; \c - - - :worker_1_port SELECT table_schema AS "Shards' Schema" FROM information_schema.tables WHERE table_name LIKE 'be$t''''t*ble%' GROUP BY table_schema; Shards' Schema --------------------------------------------------------------------- citus-teen's scnd schm. (1 row) \c - - - :master_port SELECT * FROM "citus-teen's scnd schm."."be$t''t*ble"; id --------------------------------------------------------------------- (0 rows) DROP SCHEMA "cItuS.T E E N'sSchema", "citus-teen's scnd schm." CASCADE; NOTICE: drop cascades to table "citus-teen's scnd schm."."be$t''t*ble" -- test schema propagation with user other than current user CREATE USER "test-user"; SELECT run_command_on_coordinator_and_workers('GRANT ALL ON DATABASE postgres to "test-user"'); run_command_on_coordinator_and_workers --------------------------------------------------------------------- (1 row) CREATE SCHEMA schema_with_user AUTHORIZATION "test-user"; CREATE TABLE schema_with_user.test_table(column1 int); SELECT create_reference_table('schema_with_user.test_table'); create_reference_table --------------------------------------------------------------------- (1 row) -- verify that owner of the created schema is test-user \c - - - :worker_1_port \dn schema_with_user List of schemas Name | Owner --------------------------------------------------------------------- schema_with_user | test-user (1 row) \c - - - :master_port SET citus.next_shard_id TO 1197000; -- we do not use run_command_on_coordinator_and_workers here because when there is CASCADE, it causes deadlock DROP OWNED BY "test-user" CASCADE; NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to table schema_with_user.test_table drop cascades to table schema_with_user.test_table_1190039 NOTICE: schema "schema_with_user" does not exist, skipping CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false)" PL/pgSQL function citus_drop_trigger() line XX at PERFORM DROP USER "test-user"; -- test run_command_on_* UDFs with schema CREATE SCHEMA run_test_schema; CREATE TABLE run_test_schema.test_table(id int); SELECT create_distributed_table('run_test_schema.test_table','id'); create_distributed_table --------------------------------------------------------------------- (1 row) -- randomly insert data to evaluate below UDFs better INSERT INTO run_test_schema.test_table VALUES(1); INSERT INTO run_test_schema.test_table VALUES(7); INSERT INTO run_test_schema.test_table VALUES(9); -- try UDFs which call shard_name as a subroutine SELECT sum(result::int) FROM run_command_on_placements('run_test_schema.test_table','SELECT pg_table_size(''%s'')'); sum --------------------------------------------------------------------- 49152 (1 row) SELECT sum(result::int) FROM run_command_on_shards('run_test_schema.test_table','SELECT pg_table_size(''%s'')'); sum --------------------------------------------------------------------- 24576 (1 row) -- test capital letters on both table and schema names -- create schema with weird names CREATE SCHEMA "CiTuS.TeeN"; CREATE SCHEMA "CiTUS.TEEN2"; -- create table with weird names CREATE TABLE "CiTuS.TeeN"."TeeNTabLE.1!?!"(id int, "TeNANt_Id" int); CREATE TABLE "CiTUS.TEEN2"."CAPITAL_TABLE"(i int, j int); -- create distributed table with weird names SELECT create_distributed_table('"CiTuS.TeeN"."TeeNTabLE.1!?!"', 'TeNANt_Id'); create_distributed_table --------------------------------------------------------------------- (1 row) SELECT create_distributed_table('"CiTUS.TEEN2"."CAPITAL_TABLE"', 'i'); create_distributed_table --------------------------------------------------------------------- (1 row) -- truncate tables with weird names INSERT INTO "CiTuS.TeeN"."TeeNTabLE.1!?!" VALUES(1, 1); INSERT INTO "CiTUS.TEEN2"."CAPITAL_TABLE" VALUES(0, 1); TRUNCATE "CiTuS.TeeN"."TeeNTabLE.1!?!", "CiTUS.TEEN2"."CAPITAL_TABLE"; SELECT count(*) FROM "CiTUS.TEEN2"."CAPITAL_TABLE"; count --------------------------------------------------------------------- 0 (1 row) -- insert into table with weird names INSERT INTO "CiTuS.TeeN"."TeeNTabLE.1!?!" VALUES(1, 1),(1, 0),(0, 1),(2, 3),(3, 2),(4, 4); INSERT INTO "CiTUS.TEEN2"."CAPITAL_TABLE" VALUES(0, 1),(1, 0),(2, 1),(4, 3),(3, 2),(4, 4); -- join on tables with weird names SELECT * FROM "CiTuS.TeeN"."TeeNTabLE.1!?!", "CiTUS.TEEN2"."CAPITAL_TABLE" WHERE "CiTUS.TEEN2"."CAPITAL_TABLE".i = "CiTuS.TeeN"."TeeNTabLE.1!?!"."TeNANt_Id" ORDER BY 1,2,3,4; id | TeNANt_Id | i | j --------------------------------------------------------------------- 0 | 1 | 1 | 0 1 | 0 | 0 | 1 1 | 1 | 1 | 0 2 | 3 | 3 | 2 3 | 2 | 2 | 1 4 | 4 | 4 | 3 4 | 4 | 4 | 4 (7 rows) -- add group by, having, order by clauses SELECT * FROM "CiTuS.TeeN"."TeeNTabLE.1!?!", "CiTUS.TEEN2"."CAPITAL_TABLE" WHERE "CiTUS.TEEN2"."CAPITAL_TABLE".i = "CiTuS.TeeN"."TeeNTabLE.1!?!"."TeNANt_Id" GROUP BY "TeNANt_Id", id, i, j HAVING "TeNANt_Id" > 0 AND j >= id ORDER BY "TeNANt_Id"; id | TeNANt_Id | i | j --------------------------------------------------------------------- 0 | 1 | 1 | 0 2 | 3 | 3 | 2 4 | 4 | 4 | 4 (3 rows) SELECT * FROM "CiTuS.TeeN"."TeeNTabLE.1!?!" join "CiTUS.TEEN2"."CAPITAL_TABLE" on ("CiTUS.TEEN2"."CAPITAL_TABLE".i = "CiTuS.TeeN"."TeeNTabLE.1!?!"."TeNANt_Id") GROUP BY "TeNANt_Id", id, i, j HAVING "TeNANt_Id" > 0 AND j >= id ORDER BY 1,2,3,4; id | TeNANt_Id | i | j --------------------------------------------------------------------- 0 | 1 | 1 | 0 2 | 3 | 3 | 2 4 | 4 | 4 | 4 (3 rows) -- run with CTEs WITH "cTE" AS ( SELECT * FROM "CiTuS.TeeN"."TeeNTabLE.1!?!" ) SELECT * FROM "cTE" join "CiTUS.TEEN2"."CAPITAL_TABLE" on ("cTE"."TeNANt_Id" = "CiTUS.TEEN2"."CAPITAL_TABLE".i) GROUP BY "TeNANt_Id", id, i, j HAVING "TeNANt_Id" > 0 AND j >= id ORDER BY 1,2,3,4; id | TeNANt_Id | i | j --------------------------------------------------------------------- 0 | 1 | 1 | 0 2 | 3 | 3 | 2 4 | 4 | 4 | 4 (3 rows) SET search_path to "CiTuS.TeeN"; -- and subqueries SELECT * FROM ( SELECT * FROM "TeeNTabLE.1!?!" ) "cTE" join "CiTUS.TEEN2"."CAPITAL_TABLE" on ("cTE"."TeNANt_Id" = "CiTUS.TEEN2"."CAPITAL_TABLE".i) GROUP BY "TeNANt_Id", id, i, j HAVING "TeNANt_Id" > 0 AND j >= id ORDER BY 1,2,3,4; id | TeNANt_Id | i | j --------------------------------------------------------------------- 0 | 1 | 1 | 0 2 | 3 | 3 | 2 4 | 4 | 4 | 4 (3 rows) SET search_path to default; -- Some DDL ALTER TABLE "CiTuS.TeeN"."TeeNTabLE.1!?!" ADD COLUMN "NEW_TeeN:COl" text; -- Some DML DELETE FROM "CiTuS.TeeN"."TeeNTabLE.1!?!" WHERE "TeNANt_Id"=1; -- Some more DDL ALTER TABLE "CiTuS.TeeN"."TeeNTabLE.1!?!" ADD CONSTRAINT "ConsNAmE<>" PRIMARY KEY ("TeNANt_Id"); -- test schema rename propagation CREATE SCHEMA foo; CREATE TABLE foo.test (x int, y int); SELECT create_distributed_table('foo.test', 'x'); create_distributed_table --------------------------------------------------------------------- (1 row) INSERT INTO foo.test VALUES (1, 1), (2, 2); ALTER SCHEMA foo rename to bar; SELECT COUNT(*) FROM bar.test; count --------------------------------------------------------------------- 2 (1 row) -- test propagation with weird name ALTER SCHEMA "CiTuS.TeeN" RENAME TO "Citus'Teen123"; SELECT * FROM "Citus'Teen123"."TeeNTabLE.1!?!" ORDER BY id; id | TeNANt_Id | NEW_TeeN:COl --------------------------------------------------------------------- 1 | 0 | 2 | 3 | 3 | 2 | 4 | 4 | (4 rows) -- test alter owner propagation CREATE ROLE test_non_super_user; ALTER ROLE test_non_super_user NOSUPERUSER; SELECT pg_get_userbyid(nspowner) AS schema_owner FROM pg_namespace WHERE nspname = 'bar'; schema_owner --------------------------------------------------------------------- postgres (1 row) ALTER SCHEMA bar OWNER TO test_non_super_user; select result from run_command_on_workers ($$ SELECT pg_get_userbyid(nspowner) AS schema_owner FROM pg_namespace WHERE nspname = 'bar' $$); result --------------------------------------------------------------------- test_non_super_user test_non_super_user (2 rows) ALTER SCHEMA bar OWNER TO postgres; DROP ROLE test_non_super_user; -- test error INSERT INTO bar.test VALUES (3,3), (4,4), (5,5), (6,6), (7,7), (8,8), (9,9); BEGIN; SELECT COUNT(*) FROM bar.test; count --------------------------------------------------------------------- 9 (1 row) ALTER SCHEMA bar RENAME TO foo; ERROR: cannot run schema command because there was a parallel operation on a distributed table in the transaction DETAIL: When running command on/for a distributed schema, Citus needs to perform all operations over a single connection per node to ensure consistency. HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" ROLLBACK; BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT COUNT(*) FROM bar.test; count --------------------------------------------------------------------- 9 (1 row) ALTER SCHEMA bar RENAME TO foo; ROLLBACK; -- below tests are to verify dependency propagation with nested sub-transactions -- TEST1 BEGIN; CREATE SCHEMA sc1; CREATE SEQUENCE sc1.seq; CREATE TABLE sc1.s1(id int default(nextval('sc1.seq'))); SELECT create_distributed_table('sc1.s1','id'); create_distributed_table --------------------------------------------------------------------- (1 row) COMMIT; DROP SCHEMA sc1 CASCADE; NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to sequence sc1.seq drop cascades to table sc1.s1 -- TEST2 CREATE SCHEMA sc1; BEGIN; CREATE SEQUENCE sc1.seq1; CREATE TABLE sc1.s1(id int default(nextval('sc1.seq1'))); SELECT create_distributed_table('sc1.s1','id'); create_distributed_table --------------------------------------------------------------------- (1 row) COMMIT; DROP SCHEMA sc1 CASCADE; NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to sequence sc1.seq1 drop cascades to table sc1.s1 -- TEST3 SET citus.enable_metadata_sync TO off; CREATE SCHEMA sc1; SET citus.enable_metadata_sync TO on; BEGIN; CREATE TABLE sc1.s1(id int); SELECT create_distributed_table('sc1.s1','id'); create_distributed_table --------------------------------------------------------------------- (1 row) COMMIT; DROP SCHEMA sc1 CASCADE; NOTICE: drop cascades to table sc1.s1 -- TEST4 BEGIN; SAVEPOINT sp1; CREATE SCHEMA sc1; ROLLBACK TO SAVEPOINT sp1; SET LOCAL citus.enable_metadata_sync TO off; CREATE SCHEMA sc1; SET LOCAL citus.enable_metadata_sync TO on; CREATE TABLE sc1.s1(id int); SELECT create_distributed_table('sc1.s1','id'); create_distributed_table --------------------------------------------------------------------- (1 row) COMMIT; DROP SCHEMA sc1 CASCADE; NOTICE: drop cascades to table sc1.s1 -- TEST5 BEGIN; SAVEPOINT sp1; CREATE SCHEMA sc1; RELEASE SAVEPOINT sp1; CREATE SEQUENCE seq1; CREATE TABLE sc1.s1(id int default(nextval('seq1'))); SELECT create_distributed_table('sc1.s1','id'); create_distributed_table --------------------------------------------------------------------- (1 row) COMMIT; DROP SCHEMA sc1 CASCADE; NOTICE: drop cascades to table sc1.s1 DROP SEQUENCE seq1; -- TEST6 BEGIN; SAVEPOINT sp1; SAVEPOINT sp2; CREATE SCHEMA sc1; ROLLBACK TO SAVEPOINT sp2; RELEASE SAVEPOINT sp1; SET LOCAL citus.enable_metadata_sync TO off; CREATE SCHEMA sc1; SET LOCAL citus.enable_metadata_sync TO on; CREATE TABLE sc1.s1(id int); SELECT create_distributed_table('sc1.s1','id'); create_distributed_table --------------------------------------------------------------------- (1 row) COMMIT; DROP SCHEMA sc1 CASCADE; NOTICE: drop cascades to table sc1.s1 -- TEST7 BEGIN; SAVEPOINT sp1; SAVEPOINT sp2; CREATE SCHEMA sc1; RELEASE SAVEPOINT sp2; RELEASE SAVEPOINT sp1; CREATE SEQUENCE seq1; CREATE TABLE sc1.s1(id int default(nextval('seq1'))); SELECT create_distributed_table('sc1.s1','id'); create_distributed_table --------------------------------------------------------------------- (1 row) COMMIT; DROP SCHEMA sc1 CASCADE; NOTICE: drop cascades to table sc1.s1 DROP SEQUENCE seq1; -- TEST8 BEGIN; SAVEPOINT sp1; SAVEPOINT sp2; CREATE SCHEMA sc1; RELEASE SAVEPOINT sp2; ROLLBACK TO SAVEPOINT sp1; SET LOCAL citus.enable_metadata_sync TO off; CREATE SCHEMA sc1; SET LOCAL citus.enable_metadata_sync TO on; CREATE TABLE sc1.s1(id int); SELECT create_distributed_table('sc1.s1','id'); create_distributed_table --------------------------------------------------------------------- (1 row) COMMIT; DROP SCHEMA sc1 CASCADE; NOTICE: drop cascades to table sc1.s1 -- TEST9 BEGIN; SAVEPOINT sp1; SAVEPOINT sp2; CREATE SCHEMA sc2; ROLLBACK TO SAVEPOINT sp2; SAVEPOINT sp3; CREATE SCHEMA sc1; RELEASE SAVEPOINT sp3; RELEASE SAVEPOINT sp1; CREATE SEQUENCE seq1; CREATE TABLE sc1.s1(id int default(nextval('seq1'))); SELECT create_distributed_table('sc1.s1','id'); create_distributed_table --------------------------------------------------------------------- (1 row) COMMIT; DROP SCHEMA sc1 CASCADE; NOTICE: drop cascades to table sc1.s1 DROP SEQUENCE seq1; -- TEST10 BEGIN; SAVEPOINT sp1; SAVEPOINT sp2; CREATE SCHEMA sc2; RELEASE SAVEPOINT sp2; SAVEPOINT sp3; CREATE SCHEMA sc3; SAVEPOINT sp4; CREATE SCHEMA sc1; ROLLBACK TO SAVEPOINT sp4; RELEASE SAVEPOINT sp3; RELEASE SAVEPOINT sp1; SET LOCAL citus.enable_metadata_sync TO off; CREATE SCHEMA sc1; SET LOCAL citus.enable_metadata_sync TO on; CREATE TABLE sc1.s1(id int); SELECT create_distributed_table('sc1.s1','id'); create_distributed_table --------------------------------------------------------------------- (1 row) COMMIT; DROP SCHEMA sc1 CASCADE; NOTICE: drop cascades to table sc1.s1 DROP SCHEMA sc2 CASCADE; DROP SCHEMA sc3 CASCADE; -- TEST11 BEGIN; SAVEPOINT sp1; SAVEPOINT sp2; CREATE SCHEMA sc2; RELEASE SAVEPOINT sp2; SAVEPOINT sp3; CREATE SCHEMA sc3; SAVEPOINT sp4; CREATE SCHEMA sc1; RELEASE SAVEPOINT sp4; RELEASE SAVEPOINT sp3; RELEASE SAVEPOINT sp1; CREATE SEQUENCE seq1; CREATE TABLE sc1.s1(id int default(nextval('seq1'))); SELECT create_distributed_table('sc1.s1','id'); create_distributed_table --------------------------------------------------------------------- (1 row) COMMIT; DROP SCHEMA sc1 CASCADE; NOTICE: drop cascades to table sc1.s1 DROP SCHEMA sc2 CASCADE; DROP SCHEMA sc3 CASCADE; DROP SEQUENCE seq1; -- TEST12 BEGIN; SAVEPOINT sp1; SAVEPOINT sp2; CREATE SCHEMA sc2; RELEASE SAVEPOINT sp2; SAVEPOINT sp3; CREATE SCHEMA sc3; SAVEPOINT sp4; CREATE SEQUENCE seq1; CREATE SCHEMA sc1; CREATE TABLE sc1.s1(id int default(nextval('seq1'))); SELECT create_distributed_table('sc1.s1','id'); create_distributed_table --------------------------------------------------------------------- (1 row) RELEASE SAVEPOINT sp4; RELEASE SAVEPOINT sp3; RELEASE SAVEPOINT sp1; COMMIT; DROP SCHEMA sc1 CASCADE; NOTICE: drop cascades to table sc1.s1 DROP SCHEMA sc2 CASCADE; DROP SCHEMA sc3 CASCADE; DROP SEQUENCE seq1; -- issue-6614 CREATE FUNCTION create_schema_test() RETURNS void AS $$ BEGIN SET citus.create_object_propagation = 'deferred'; CREATE SCHEMA test_1; CREATE TABLE test_1.test ( id bigserial constraint test_pk primary key, creation_date timestamp constraint test_creation_date_df default timezone('UTC'::text, CURRENT_TIMESTAMP) not null ); PERFORM create_reference_table('test_1.test'); RETURN; END; $$ LANGUAGE plpgsql; SELECT create_schema_test(); create_schema_test --------------------------------------------------------------------- (1 row) SELECT result FROM run_command_on_all_nodes($$ SELECT COUNT(*) = 1 FROM pg_dist_partition WHERE logicalrelid = 'test_1.test'::regclass $$); result --------------------------------------------------------------------- t t t (3 rows) DROP FUNCTION create_schema_test; DROP SCHEMA test_1 CASCADE; NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to table test_1.test drop cascades to table test_1.test_1197064 -- Clean up the created schema SET client_min_messages TO WARNING; SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM pg_catalog.pg_dist_object WHERE classid=2615 and objid IN (select oid from pg_namespace where nspname='run_test_schema'); pg_identify_object_as_address --------------------------------------------------------------------- (schema,{run_test_schema},{}) (1 row) DROP TABLE public.nation_local; DROP SCHEMA run_test_schema, test_schema_support_join_1, test_schema_support_join_2, "Citus'Teen123", "CiTUS.TEEN2", bar, test_schema_support CASCADE; -- verify that the dropped schema is removed from worker's pg_dist_object SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM pg_catalog.pg_dist_object WHERE classid=2615 and objid IN (select oid from pg_namespace where nspname='run_test_schema'); pg_identify_object_as_address --------------------------------------------------------------------- (0 rows)