diff --git a/src/test/regress/expected/alter_index.out b/src/test/regress/expected/alter_index.out index 463f44ee7..4d4a725b3 100644 --- a/src/test/regress/expected/alter_index.out +++ b/src/test/regress/expected/alter_index.out @@ -47,7 +47,7 @@ ORDER BY c.relname, a.attnum; \c - - - :worker_1_port SELECT c.relname, a.attstattarget FROM pg_attribute a -JOIN pg_class c ON a.attrelid = c.oid AND c.relname LIKE 'test\_idx%' +JOIN pg_class c ON a.attrelid = c.oid AND c.relname SIMILAR TO 'test\_idx%\_\d%' ORDER BY c.relname, a.attnum; relname | attstattarget --------------------------------------------------------------------- diff --git a/src/test/regress/expected/auto_undist_citus_local.out b/src/test/regress/expected/auto_undist_citus_local.out index 73e900a6c..8051f8a8a 100644 --- a/src/test/regress/expected/auto_undist_citus_local.out +++ b/src/test/regress/expected/auto_undist_citus_local.out @@ -5,6 +5,7 @@ CREATE SCHEMA drop_fkey_cascade; SET search_path TO drop_fkey_cascade; SET client_min_messages TO WARNING; SET citus.next_shard_id TO 1810000; +SET citus.next_placement_id TO 3070000; SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0); ?column? --------------------------------------------------------------------- diff --git a/src/test/regress/expected/citus_local_tables.out b/src/test/regress/expected/citus_local_tables.out index 4ad0ade5f..35cdc56fd 100644 --- a/src/test/regress/expected/citus_local_tables.out +++ b/src/test/regress/expected/citus_local_tables.out @@ -948,7 +948,7 @@ select count(*) from pg_constraint where conname = 'fkey_test_drop'; select inhrelid::regclass from pg_inherits where (select inhparent::regclass::text) ~ '^parent_1_\d{7}$' order by 1; inhrelid --------------------------------------------------------------------- - parent_1_child_1_1190084 + parent_1_child_1_1190046 (1 row) -- check the shell partition diff --git a/src/test/regress/expected/coordinator_shouldhaveshards.out b/src/test/regress/expected/coordinator_shouldhaveshards.out index 1b5cf7051..e11bda9c6 100644 --- a/src/test/regress/expected/coordinator_shouldhaveshards.out +++ b/src/test/regress/expected/coordinator_shouldhaveshards.out @@ -2,6 +2,7 @@ CREATE SCHEMA coordinator_shouldhaveshards; SET search_path TO coordinator_shouldhaveshards; SET citus.next_shard_id TO 1503000; +SET citus.next_placement_id TO 1503000; -- idempotently add node to allow this test to run without add_coordinator SET client_min_messages TO WARNING; SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0); diff --git a/src/test/regress/expected/create_citus_local_table_cascade.out b/src/test/regress/expected/create_citus_local_table_cascade.out index fcf3f4c0a..ab84cc251 100644 --- a/src/test/regress/expected/create_citus_local_table_cascade.out +++ b/src/test/regress/expected/create_citus_local_table_cascade.out @@ -1,5 +1,6 @@ \set VERBOSITY terse SET citus.next_shard_id TO 1516000; +SET citus.next_placement_id TO 1516000; SET citus.shard_replication_factor TO 1; CREATE SCHEMA citus_add_local_table_to_metadata_cascade; SET search_path TO citus_add_local_table_to_metadata_cascade; diff --git a/src/test/regress/expected/create_ref_dist_from_citus_local.out b/src/test/regress/expected/create_ref_dist_from_citus_local.out index a4eceb8e1..7f6821b1f 100644 --- a/src/test/regress/expected/create_ref_dist_from_citus_local.out +++ b/src/test/regress/expected/create_ref_dist_from_citus_local.out @@ -1,5 +1,6 @@ \set VERBOSITY terse SET citus.next_shard_id TO 1800000; +SET citus.next_placement_id TO 8500000; SET citus.shard_replication_factor TO 1; CREATE SCHEMA create_ref_dist_from_citus_local; SET search_path TO create_ref_dist_from_citus_local; diff --git a/src/test/regress/expected/drop_partitioned_table.out b/src/test/regress/expected/drop_partitioned_table.out index 9b4a94e89..17ebb5e97 100644 --- a/src/test/regress/expected/drop_partitioned_table.out +++ b/src/test/regress/expected/drop_partitioned_table.out @@ -62,7 +62,7 @@ WHERE n.nspname IN ('drop_partitioned_table', 'schema1') ORDER BY 1, 2; \c - - - :master_port SET search_path = drop_partitioned_table; -SET citus.next_shard_id TO 720000; +SET citus.next_shard_id TO 721000; -- CASE 1 -- Dropping the parent table CREATE TABLE parent (x text, t timestamptz DEFAULT now()) PARTITION BY RANGE (t); @@ -103,7 +103,7 @@ SELECT * FROM drop_partitioned_table.tables_info; \c - - - :master_port SET search_path = drop_partitioned_table; -SET citus.next_shard_id TO 720000; +SET citus.next_shard_id TO 722000; -- CASE 2 -- Dropping the parent table, but including children in the DROP command CREATE TABLE parent (x text, t timestamptz DEFAULT now()) PARTITION BY RANGE (t); @@ -144,7 +144,7 @@ SELECT * FROM drop_partitioned_table.tables_info; \c - - - :master_port SET search_path = drop_partitioned_table; -SET citus.next_shard_id TO 720000; +SET citus.next_shard_id TO 723000; -- CASE 3 -- DROP OWNED BY role1; Only parent is owned by role1, children are owned by another owner SET client_min_messages TO warning; @@ -200,7 +200,7 @@ SELECT * FROM drop_partitioned_table.tables_info; \c - - - :master_port SET search_path = drop_partitioned_table; -SET citus.next_shard_id TO 720000; +SET citus.next_shard_id TO 724000; -- CASE 4 -- DROP OWNED BY role1; Parent and children are owned by role1 GRANT ALL ON SCHEMA drop_partitioned_table TO role1; @@ -244,7 +244,7 @@ SELECT * FROM drop_partitioned_table.tables_info; \c - - - :master_port SET search_path = drop_partitioned_table; -SET citus.next_shard_id TO 720000; +SET citus.next_shard_id TO 725000; REVOKE ALL ON SCHEMA drop_partitioned_table FROM role1; DROP ROLE role1; SELECT run_command_on_workers('DROP ROLE IF EXISTS role1'); @@ -296,7 +296,7 @@ SELECT * FROM drop_partitioned_table.tables_info; (0 rows) \c - - - :master_port -SET citus.next_shard_id TO 720000; +SET citus.next_shard_id TO 726000; -- CASE 6 -- DROP SCHEMA schema1 CASCADE; Parent and children are in schema1 CREATE SCHEMA schema1; @@ -343,7 +343,7 @@ SET search_path = drop_partitioned_table; -- Check that we actually skip sending remote commands to skip shards SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 1; -SET citus.next_shard_id TO 720000; +SET citus.next_shard_id TO 727000; DROP EVENT TRIGGER new_trigger_for_drops; -- Case 1 - we should skip CREATE TABLE parent (x text, t timestamptz DEFAULT now()) PARTITION BY RANGE (t); @@ -359,18 +359,42 @@ BEGIN; SET citus.log_remote_commands TO on; DROP TABLE parent; NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); +NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); +NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' +NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' +NOTICE: issuing ALTER TABLE IF EXISTS drop_partitioned_table.parent DETACH PARTITION drop_partitioned_table.child1; +NOTICE: issuing ALTER TABLE IF EXISTS drop_partitioned_table.parent DETACH PARTITION drop_partitioned_table.child1; +NOTICE: issuing SET citus.enable_ddl_propagation TO 'on' +NOTICE: issuing SET citus.enable_ddl_propagation TO 'on' +NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.parent') +NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.parent') NOTICE: issuing DROP TABLE IF EXISTS drop_partitioned_table.parent_xxxxx CASCADE +NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.child1') +NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.child1') ROLLBACK; NOTICE: issuing ROLLBACK +NOTICE: issuing ROLLBACK -- Case 2 - we shouldn't skip BEGIN; SET citus.log_remote_commands TO on; DROP TABLE parent, child1; NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); +NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); +NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' +NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' +NOTICE: issuing ALTER TABLE IF EXISTS drop_partitioned_table.parent DETACH PARTITION drop_partitioned_table.child1; +NOTICE: issuing ALTER TABLE IF EXISTS drop_partitioned_table.parent DETACH PARTITION drop_partitioned_table.child1; +NOTICE: issuing SET citus.enable_ddl_propagation TO 'on' +NOTICE: issuing SET citus.enable_ddl_propagation TO 'on' +NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.parent') +NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.parent') NOTICE: issuing DROP TABLE IF EXISTS drop_partitioned_table.parent_xxxxx CASCADE +NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.child1') +NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.child1') NOTICE: issuing DROP TABLE IF EXISTS drop_partitioned_table.child1_xxxxx CASCADE ROLLBACK; NOTICE: issuing ROLLBACK +NOTICE: issuing ROLLBACK DROP SCHEMA drop_partitioned_table CASCADE; NOTICE: drop cascades to 3 other objects SELECT run_command_on_workers('DROP SCHEMA IF EXISTS drop_partitioned_table CASCADE'); diff --git a/src/test/regress/expected/fkeys_between_local_ref.out b/src/test/regress/expected/fkeys_between_local_ref.out index 49fc9d100..b142bd4d3 100644 --- a/src/test/regress/expected/fkeys_between_local_ref.out +++ b/src/test/regress/expected/fkeys_between_local_ref.out @@ -1,5 +1,6 @@ \set VERBOSITY terse SET citus.next_shard_id TO 1518000; +SET citus.next_placement_id TO 4090000; SET citus.shard_replication_factor TO 1; CREATE SCHEMA fkeys_between_local_ref; SET search_path TO fkeys_between_local_ref; diff --git a/src/test/regress/expected/foreign_key_to_reference_table.out b/src/test/regress/expected/foreign_key_to_reference_table.out index 927964bbb..dd110349e 100644 --- a/src/test/regress/expected/foreign_key_to_reference_table.out +++ b/src/test/regress/expected/foreign_key_to_reference_table.out @@ -144,7 +144,7 @@ SELECT create_distributed_table('referencing_table', 'ref_id'); (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(id) REFERENCES referenced_table(id) ON DELETE SET NULL; -SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; count --------------------------------------------------------------------- 8 @@ -158,7 +158,7 @@ SELECT create_distributed_table('referencing_table', 'ref_id'); (1 row) -SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; count --------------------------------------------------------------------- 8 @@ -173,7 +173,7 @@ SELECT create_distributed_table('referencing_table', 'ref_id'); (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(id) REFERENCES referenced_table(id) ON DELETE SET DEFAULT; -SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; count --------------------------------------------------------------------- 8 @@ -189,7 +189,7 @@ BEGIN; (1 row) COMMIT; -SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; count --------------------------------------------------------------------- 8 @@ -204,7 +204,7 @@ SELECT create_distributed_table('referencing_table', 'ref_id'); (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(id) REFERENCES referenced_table(id) ON UPDATE SET NULL; -SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; count --------------------------------------------------------------------- 8 @@ -219,7 +219,7 @@ SELECT create_distributed_table('referencing_table', 'ref_id'); (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(id) REFERENCES referenced_table(id) ON UPDATE SET DEFAULT; -SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; count --------------------------------------------------------------------- 8 @@ -234,7 +234,7 @@ SELECT create_distributed_table('referencing_table', 'ref_id'); (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(id) REFERENCES referenced_table(id) ON UPDATE CASCADE; -SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; count --------------------------------------------------------------------- 8 @@ -253,7 +253,7 @@ ALTER TABLE referencing_table ADD COLUMN referencing int REFERENCES referenced_t ERROR: cannot execute ADD COLUMN command with PRIMARY KEY, UNIQUE, FOREIGN and CHECK constraints DETAIL: Adding a column with a constraint in one command is not supported because all constraints in Citus must have explicit names HINT: You can issue each command separately such as ALTER TABLE referencing_table ADD COLUMN referencing data_type; ALTER TABLE referencing_table ADD CONSTRAINT constraint_name FOREIGN KEY (referencing) REFERENCES referenced_table(id) ON UPDATE CASCADE; -SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; count --------------------------------------------------------------------- 0 @@ -273,7 +273,7 @@ ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (id) REFERENCE ERROR: cannot create foreign key constraint DETAIL: Citus currently supports foreign key constraints only for "citus.shard_replication_factor = 1". HINT: Please change "citus.shard_replication_factor to 1". To learn more about using foreign keys with other replication factors, please contact us at https://citusdata.com/about/contact_us. -SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; count --------------------------------------------------------------------- 0 @@ -292,7 +292,7 @@ ALTER TABLE referencing_table ADD COLUMN referencing_col int REFERENCES referenc ERROR: cannot create foreign key constraint DETAIL: Citus currently supports foreign key constraints only for "citus.shard_replication_factor = 1". HINT: Please change "citus.shard_replication_factor to 1". To learn more about using foreign keys with other replication factors, please contact us at https://citusdata.com/about/contact_us. -SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; count --------------------------------------------------------------------- 0 @@ -308,7 +308,7 @@ SELECT create_distributed_table('referencing_table', 'ref_id'); (1 row) -SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; count --------------------------------------------------------------------- 8 @@ -332,7 +332,7 @@ BEGIN; (1 row) COMMIT; -SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; count --------------------------------------------------------------------- 8 @@ -716,7 +716,7 @@ SELECT create_distributed_table('referencing_table', 'id'); ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (id) REFERENCES referenced_table(test_column) ON DELETE CASCADE; ALTER TABLE referencing_table ADD CONSTRAINT foreign_key_2 FOREIGN KEY (id) REFERENCES referenced_table2(test_column2) ON DELETE CASCADE; -SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; count --------------------------------------------------------------------- 16 @@ -783,7 +783,7 @@ SELECT create_distributed_table('referencing_table', 'id'); (1 row) -SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT count(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; count --------------------------------------------------------------------- 16 @@ -825,7 +825,7 @@ BEGIN; ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (id) REFERENCES referenced_table(test_column) ON DELETE CASCADE; ALTER TABLE referencing_table ADD CONSTRAINT foreign_key_2 FOREIGN KEY (ref_id) REFERENCES referenced_table2(test_column2) ON DELETE CASCADE; COMMIT; -SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; count --------------------------------------------------------------------- 16 @@ -892,7 +892,7 @@ BEGIN; ALTER TABLE referencing_table ADD CONSTRAINT foreign_key_2 FOREIGN KEY (ref_id) REFERENCES referenced_table2(test_column2) ON DELETE CASCADE; COMMIT; -SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT count(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; count --------------------------------------------------------------------- 16 @@ -937,7 +937,7 @@ ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (id) REFERENCE ALTER TABLE referencing_table2 ADD CONSTRAINT fkey_ref FOREIGN KEY (ref_id) REFERENCES referenced_table(test_column2) ON DELETE CASCADE; ALTER TABLE referencing_table2 ADD CONSTRAINT fkey_ref_to_dist FOREIGN KEY (id) REFERENCES referencing_table(id) ON DELETE CASCADE; COMMIT; -SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; count --------------------------------------------------------------------- 24 @@ -989,7 +989,7 @@ SELECT create_reference_table('referenced_table'); (1 row) -SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT count(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; count --------------------------------------------------------------------- 0 @@ -1025,7 +1025,7 @@ SELECT create_distributed_table('referencing_referencing_table', 'id'); (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (ref_id, ref_id2) REFERENCES referenced_table(test_column, test_column2) ON DELETE CASCADE; -SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.referencing%'; +SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.referencing%\d{2,}'; count --------------------------------------------------------------------- 16 @@ -1117,14 +1117,14 @@ SELECT create_distributed_table('test_table_2', 'id'); (1 row) -SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT count(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; count --------------------------------------------------------------------- 8 (1 row) ALTER TABLE test_table_2 DROP CONSTRAINT test_table_2_value_1_fkey; -SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT count(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; count --------------------------------------------------------------------- 0 @@ -1154,7 +1154,7 @@ HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_m ALTER TABLE test_table_2 DROP CONSTRAINT test_table_2_value_1_fkey; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; -SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT count(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; count --------------------------------------------------------------------- 0 @@ -1178,7 +1178,7 @@ SELECT create_distributed_table('test_table_2', 'id'); (1 row) ALTER TABLE test_table_1 DROP CONSTRAINT test_table_1_pkey CASCADE; -SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT count(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; count --------------------------------------------------------------------- 0 @@ -1203,7 +1203,7 @@ BEGIN; ALTER TABLE test_table_1 DROP CONSTRAINT test_table_1_pkey CASCADE; COMMIT; -SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT count(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; count --------------------------------------------------------------------- 0 @@ -1226,7 +1226,7 @@ SELECT create_distributed_table('test_table_2', 'id'); (1 row) ALTER TABLE test_table_2 DROP COLUMN value_1; -SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT count(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; count --------------------------------------------------------------------- 0 @@ -1251,7 +1251,7 @@ BEGIN; ALTER TABLE test_table_2 DROP COLUMN value_1; COMMIT; -SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT count(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; count --------------------------------------------------------------------- 0 @@ -1274,7 +1274,7 @@ SELECT create_distributed_table('test_table_2', 'id'); (1 row) ALTER TABLE test_table_1 DROP COLUMN id CASCADE; -SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT count(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; count --------------------------------------------------------------------- 0 @@ -1299,7 +1299,7 @@ BEGIN; ALTER TABLE test_table_1 DROP COLUMN id CASCADE; COMMIT; -SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT count(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; count --------------------------------------------------------------------- 0 @@ -1332,7 +1332,7 @@ INSERT INTO test_table_2 VALUES (4,2147483648); ALTER TABLE test_table_2 ALTER COLUMN value_1 SET DATA TYPE int; ERROR: integer out of range CONTEXT: while executing command on localhost:xxxxx -SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT count(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; count --------------------------------------------------------------------- 8 @@ -1359,7 +1359,7 @@ BEGIN; ALTER TABLE test_table_2 ALTER COLUMN value_1 SET DATA TYPE bigint; ALTER TABLE test_table_1 DROP COLUMN id CASCADE; COMMIT; -SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT count(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; count --------------------------------------------------------------------- 0 diff --git a/src/test/regress/expected/local_shard_utility_command_execution.out b/src/test/regress/expected/local_shard_utility_command_execution.out index 0e846f4a0..45c9dbb68 100644 --- a/src/test/regress/expected/local_shard_utility_command_execution.out +++ b/src/test/regress/expected/local_shard_utility_command_execution.out @@ -5,6 +5,7 @@ -- (shouldHaveShards = on) and having reference table placements in it. \set VERBOSITY terse SET citus.next_shard_id TO 1500000; +SET citus.next_placement_id TO 8300000; SET citus.shard_replication_factor TO 1; SET citus.enable_local_execution TO ON; SET citus.shard_COUNT TO 32; diff --git a/src/test/regress/expected/master_copy_shard_placement.out b/src/test/regress/expected/master_copy_shard_placement.out index aebc96922..4b5ee8ae7 100644 --- a/src/test/regress/expected/master_copy_shard_placement.out +++ b/src/test/regress/expected/master_copy_shard_placement.out @@ -130,12 +130,5 @@ SELECT master_copy_shard_placement( do_repair := false, transfer_mode := 'block_writes'); ERROR: Table 'mx_table' is streaming replicated. Shards of streaming replicated tables cannot be copied -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); -NOTICE: dropping metadata on the node (localhost,57637) - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - SET client_min_messages TO ERROR; DROP SCHEMA mcsp CASCADE; diff --git a/src/test/regress/expected/multi_colocation_utils.out b/src/test/regress/expected/multi_colocation_utils.out index 886638b3c..887387b33 100644 --- a/src/test/regress/expected/multi_colocation_utils.out +++ b/src/test/regress/expected/multi_colocation_utils.out @@ -337,13 +337,13 @@ SELECT find_shard_interval_index(1300016); (1 row) -- check external colocation API -SELECT count(*) FROM pg_dist_partition WHERE colocationid = 4; +SELECT count(*) FROM pg_dist_partition WHERE colocationid IN (4, 5); count --------------------------------------------------------------------- 0 (1 row) -DELETE FROM pg_dist_colocation WHERE colocationid = 4; +DELETE FROM pg_dist_colocation WHERE colocationid IN (4, 5); SET citus.shard_count = 2; CREATE TABLE table1_groupA ( id int ); SELECT create_distributed_table('table1_groupA', 'id'); diff --git a/src/test/regress/expected/multi_fix_partition_shard_index_names.out b/src/test/regress/expected/multi_fix_partition_shard_index_names.out index ba08130a6..466d39c31 100644 --- a/src/test/regress/expected/multi_fix_partition_shard_index_names.out +++ b/src/test/regress/expected/multi_fix_partition_shard_index_names.out @@ -8,6 +8,14 @@ SET citus.next_shard_id TO 910000; SET citus.shard_replication_factor TO 1; CREATE SCHEMA fix_idx_names; SET search_path TO fix_idx_names, public; +-- stop metadata sync for one of the worker nodes so we test both cases +SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); +NOTICE: dropping metadata on the node (localhost,57637) + stop_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + -- NULL input should automatically return NULL since -- fix_partition_shard_index_names is strict -- same for worker_fix_partition_shard_index_names @@ -68,7 +76,7 @@ SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' O -- the names are generated correctly -- shard id has been appended to all index names which didn't end in shard id -- this goes in line with Citus's way of naming indexes of shards: always append shardid to the end -SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2; +SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' AND tablename SIMILAR TO '%\_\d*' ORDER BY 1, 2; tablename | indexname --------------------------------------------------------------------- dist_partitioned_table_910004 | short_910004 @@ -223,13 +231,6 @@ SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' O \c - - - :master_port SET search_path TO fix_idx_names, public; -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); -NOTICE: dropping metadata on the node (localhost,57637) - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - DROP INDEX short; DROP TABLE yet_another_partition_table, another_partition_table_with_very_long_name; -- this will create constraint1 index on parent @@ -256,7 +257,7 @@ SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' O \c - - - :worker_1_port -- index names end in shardid for partitions -SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2; +SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' AND tablename SIMILAR TO '%\_\d*' ORDER BY 1, 2; tablename | indexname --------------------------------------------------------------------- dist_partitioned_table_910004 | constraint1_910004 @@ -309,7 +310,7 @@ SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' O \c - - - :worker_1_port -- index names are already correct, including inherited index for another_partition -SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2; +SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' AND tablename SIMILAR TO '%\_\d*' ORDER BY 1, 2; tablename | indexname --------------------------------------------------------------------- another_partition_361176 | another_partition_dist_col_idx_361176 @@ -348,7 +349,7 @@ SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' O \c - - - :worker_1_port -- we have correct names -SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2; +SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' AND tablename SIMILAR TO '%\_\d*' ORDER BY 1, 2; tablename | indexname --------------------------------------------------------------------- dist_partitioned_table_910004 | expression_index_910004 @@ -388,7 +389,7 @@ SELECT fix_partition_shard_index_names('dist_partitioned_table'::regclass); (1 row) \c - - - :worker_1_port -SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2; +SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' AND tablename SIMILAR TO '%\_\d*' ORDER BY 1, 2; tablename | indexname --------------------------------------------------------------------- dist_partitioned_table_910004 | expression_index_910004 @@ -427,7 +428,7 @@ SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' O -- index names are correct -- shard id has been appended to all index names which didn't end in shard id -- this goes in line with Citus's way of naming indexes of shards: always append shardid to the end -SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2; +SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' AND tablename SIMILAR TO '%\_\d*' ORDER BY 1, 2; tablename | indexname --------------------------------------------------------------------- dist_partitioned_table_910050 | index_rep_factor_2_910050 @@ -481,7 +482,7 @@ DROP INDEX p_another_col_partition_col_idx; \c - - - :worker_1_port -- check that indexes have been renamed -- and that index on p has been dropped (it won't appear) -SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2; +SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' AND tablename SIMILAR TO '%\_\d*' ORDER BY 1, 2; tablename | indexname --------------------------------------------------------------------- dist_partitioned_table_910030 | short_910030 @@ -547,35 +548,89 @@ SET citus.log_remote_commands TO ON; CREATE INDEX i4 ON parent_table(dist_col); NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SET search_path TO fix_idx_names,public; +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SET search_path TO fix_idx_names,public; +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing CREATE INDEX i4 ON parent_table(dist_col); +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing CREATE INDEX i4 ON parent_table(dist_col); +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing CREATE INDEX i4_915000 ON fix_idx_names.parent_table_915000 USING btree (dist_col ) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SELECT worker_fix_partition_shard_index_names('fix_idx_names.i4_915000'::regclass, 'fix_idx_names.p1_915001', 'p1_dist_col_idx3_915001') DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -- only fix the index backing the pkey ALTER TABLE parent_table ADD CONSTRAINT pkey_cst PRIMARY KEY (dist_col, partition_col); NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SET search_path TO fix_idx_names,public; +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SET search_path TO fix_idx_names,public; +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ALTER TABLE parent_table ADD CONSTRAINT pkey_cst PRIMARY KEY (dist_col, partition_col); +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ALTER TABLE parent_table ADD CONSTRAINT pkey_cst PRIMARY KEY (dist_col, partition_col); +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SELECT worker_apply_shard_ddl_command (915000, 'fix_idx_names', 'ALTER TABLE parent_table ADD CONSTRAINT pkey_cst PRIMARY KEY (dist_col, partition_col);') DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SELECT worker_fix_partition_shard_index_names('fix_idx_names.pkey_cst_915000'::regclass, 'fix_idx_names.p1_915001', 'p1_pkey_915001') DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx ALTER TABLE parent_table ADD CONSTRAINT unique_cst UNIQUE (dist_col, partition_col); NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SET search_path TO fix_idx_names,public; +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SET search_path TO fix_idx_names,public; +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ALTER TABLE parent_table ADD CONSTRAINT unique_cst UNIQUE (dist_col, partition_col); +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ALTER TABLE parent_table ADD CONSTRAINT unique_cst UNIQUE (dist_col, partition_col); +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SELECT worker_apply_shard_ddl_command (915000, 'fix_idx_names', 'ALTER TABLE parent_table ADD CONSTRAINT unique_cst UNIQUE (dist_col, partition_col);') DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SELECT worker_fix_partition_shard_index_names('fix_idx_names.unique_cst_915000'::regclass, 'fix_idx_names.p1_915001', 'p1_dist_col_partition_col_key_915001') DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx RESET citus.log_remote_commands; @@ -597,6 +652,52 @@ NOTICE: issuing SELECT worker_apply_shard_ddl_command (915002, 'fix_idx_names', DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing CREATE TABLE fix_idx_names.p2 (dist_col integer NOT NULL, another_col integer, partition_col timestamp without time zone NOT NULL, name text) USING columnar +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing CREATE TABLE fix_idx_names.p2 (dist_col integer NOT NULL, another_col integer, partition_col timestamp without time zone NOT NULL, name text) USING columnar +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SELECT alter_columnar_table_set('fix_idx_names.p2', chunk_group_row_limit => 10000, stripe_row_limit => 150000, compression_level => 3, compression => 'zstd'); +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SELECT alter_columnar_table_set('fix_idx_names.p2', chunk_group_row_limit => 10000, stripe_row_limit => 150000, compression_level => 3, compression => 'zstd'); +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ALTER TABLE fix_idx_names.p2 OWNER TO postgres +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ALTER TABLE fix_idx_names.p2 OWNER TO postgres +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SELECT citus_internal_add_partition_metadata ('fix_idx_names.p2'::regclass, 'h', 'dist_col', 1370000, 's') +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SELECT citus_internal_add_partition_metadata ('fix_idx_names.p2'::regclass, 'h', 'dist_col', 1370000, 's') +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SELECT worker_create_truncate_trigger('fix_idx_names.p2') +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SELECT worker_create_truncate_trigger('fix_idx_names.p2') +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('fix_idx_names.p2'::regclass, 915002, 't'::"char", '-2147483648', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('fix_idx_names.p2'::regclass, 915002, 't'::"char", '-2147483648', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (915002, 1, 0, 1, 3380)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data; +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (915002, 1, 0, 1, 3380)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data; +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SET search_path TO fix_idx_names,public; +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SET search_path TO fix_idx_names,public; +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ALTER TABLE parent_table ATTACH PARTITION p2 FOR VALUES FROM ('2019-01-01') TO ('2020-01-01'); +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ALTER TABLE parent_table ATTACH PARTITION p2 FOR VALUES FROM ('2019-01-01') TO ('2020-01-01'); +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SELECT worker_apply_inter_shard_ddl_command (915000, 'fix_idx_names', 915002, 'fix_idx_names', 'ALTER TABLE parent_table ATTACH PARTITION p2 FOR VALUES FROM (''2019-01-01'') TO (''2020-01-01'');') DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SELECT worker_fix_partition_shard_index_names('fix_idx_names.i1_915000'::regclass, 'fix_idx_names.p2_915002', 'p2_dist_col_idx_915002') @@ -613,6 +714,10 @@ NOTICE: issuing SELECT worker_fix_partition_shard_index_names('fix_idx_names.un DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx RESET citus.log_remote_commands; diff --git a/src/test/regress/expected/multi_function_evaluation.out b/src/test/regress/expected/multi_function_evaluation.out index 1f921b3d2..1b2531060 100644 --- a/src/test/regress/expected/multi_function_evaluation.out +++ b/src/test/regress/expected/multi_function_evaluation.out @@ -10,19 +10,15 @@ SET citus.next_shard_id TO 1200000; SET citus.enable_fast_path_router_planner TO false; -- nextval() works (no good way to test DEFAULT, or, by extension, SERIAL) CREATE TABLE example (key INT, value INT); -SELECT master_create_distributed_table('example', 'key', 'hash'); - master_create_distributed_table +SET citus.shard_replication_factor TO 2; +SELECT create_distributed_table('example', 'key', shard_count:=1); + create_distributed_table --------------------------------------------------------------------- (1 row) +RESET citus.shard_replication_factor; CREATE SEQUENCE example_value_seq; -SELECT master_create_worker_shards('example', 1, 2); - master_create_worker_shards ---------------------------------------------------------------------- - -(1 row) - INSERT INTO example VALUES (1, nextval('example_value_seq')); SELECT * FROM example; key | value diff --git a/src/test/regress/expected/multi_index_statements.out b/src/test/regress/expected/multi_index_statements.out index 52125cc65..875eb3634 100644 --- a/src/test/regress/expected/multi_index_statements.out +++ b/src/test/regress/expected/multi_index_statements.out @@ -155,25 +155,25 @@ SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_t (19 rows) \c - - - :worker_1_port -SELECT count(*) FROM pg_indexes WHERE tablename = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1); +SELECT count(*) FROM pg_indexes WHERE tablename = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_%' ORDER BY relname LIMIT 1); count --------------------------------------------------------------------- 9 (1 row) -SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_hash%'; +SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_hash_%'; count --------------------------------------------------------------------- 56 (1 row) -SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_range%'; +SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_range_%'; count --------------------------------------------------------------------- 6 (1 row) -SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_append%'; +SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_append_%'; count --------------------------------------------------------------------- 0 @@ -522,7 +522,7 @@ SELECT count(*) FROM pg_inherits WHERE inhrelid::regclass::text = 'child_index' \c - - - :worker_1_port SET search_path TO multi_index_statements; -- show that child indices of partition shards also inherit from parent indices of parent shards -SELECT count(*) FROM pg_inherits WHERE inhrelid::regclass::text LIKE 'child_index%' AND inhparent::regclass::text LIKE 'parent_index%'; +SELECT count(*) FROM pg_inherits WHERE inhrelid::regclass::text LIKE 'child_index\_%' AND inhparent::regclass::text LIKE 'parent_index\_%'; count --------------------------------------------------------------------- 16 diff --git a/src/test/regress/expected/multi_metadata_sync.out b/src/test/regress/expected/multi_metadata_sync.out index 7a1677992..1fa8a4816 100644 --- a/src/test/regress/expected/multi_metadata_sync.out +++ b/src/test/regress/expected/multi_metadata_sync.out @@ -272,13 +272,13 @@ SELECT * FROM pg_dist_node ORDER BY nodeid; 5 | 1 | localhost | 8889 | default | f | t | secondary | second-cluster | f | t (4 rows) -SELECT * FROM pg_dist_partition ORDER BY logicalrelid; +SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid; logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted --------------------------------------------------------------------- mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 0 | s | f (1 row) -SELECT * FROM pg_dist_shard ORDER BY shardid; +SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid; logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue --------------------------------------------------------------------- mx_testing_schema.mx_test_table | 1310000 | t | -2147483648 | -1610612737 @@ -291,7 +291,7 @@ SELECT * FROM pg_dist_shard ORDER BY shardid; mx_testing_schema.mx_test_table | 1310007 | t | 1610612736 | 2147483647 (8 rows) -SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport; +SELECT * FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%') ORDER BY shardid, nodename, nodeport; shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- 1310000 | 1 | 0 | localhost | 57637 | 100000 @@ -409,13 +409,13 @@ SELECT * FROM pg_dist_node ORDER BY nodeid; 5 | 1 | localhost | 8889 | default | f | t | secondary | second-cluster | f | t (4 rows) -SELECT * FROM pg_dist_partition ORDER BY logicalrelid; +SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid; logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted --------------------------------------------------------------------- mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 0 | s | f (1 row) -SELECT * FROM pg_dist_shard ORDER BY shardid; +SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid; logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue --------------------------------------------------------------------- mx_testing_schema.mx_test_table | 1310000 | t | -2147483648 | -1610612737 @@ -428,7 +428,7 @@ SELECT * FROM pg_dist_shard ORDER BY shardid; mx_testing_schema.mx_test_table | 1310007 | t | 1610612736 | 2147483647 (8 rows) -SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport; +SELECT * FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%') ORDER BY shardid, nodename, nodeport; shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- 1310000 | 1 | 0 | localhost | 57637 | 100000 @@ -730,12 +730,12 @@ ORDER BY \c - - - :worker_2_port \d mx_test_schema_1.mx_table_1 \d mx_test_schema_2.mx_table_2 -SELECT * FROM pg_dist_partition; +SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_test_schema%'; logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted --------------------------------------------------------------------- (0 rows) -SELECT * FROM pg_dist_shard; +SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_test_schema%'; logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_modifying_xacts.out b/src/test/regress/expected/multi_modifying_xacts.out index acf958f33..226529040 100644 --- a/src/test/regress/expected/multi_modifying_xacts.out +++ b/src/test/regress/expected/multi_modifying_xacts.out @@ -12,30 +12,21 @@ CREATE TABLE labs ( id bigint NOT NULL, name text NOT NULL ); -SELECT master_create_distributed_table('researchers', 'lab_id', 'hash'); - master_create_distributed_table +SET citus.shard_replication_factor TO 2; +SELECT create_distributed_table('researchers', 'lab_id', shard_count:=2); + create_distributed_table --------------------------------------------------------------------- (1 row) -SELECT master_create_worker_shards('researchers', 2, 2); - master_create_worker_shards ---------------------------------------------------------------------- - -(1 row) - -SELECT master_create_distributed_table('labs', 'id', 'hash'); - master_create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT master_create_worker_shards('labs', 1, 1); - master_create_worker_shards +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('labs', 'id', shard_count:=1); + create_distributed_table --------------------------------------------------------------------- (1 row) +RESET citus.shard_replication_factor; -- might be confusing to have two people in the same lab with the same name CREATE UNIQUE INDEX avoid_name_confusion_idx ON researchers (lab_id, name); -- add some data @@ -1177,6 +1168,7 @@ SELECT create_distributed_table('numbers_hash_failure_test', 'key'); -- ensure that the shard is created for this user \c - test_user - :worker_1_port +SET citus.override_table_visibility TO false; \dt reference_failure_test_1200015 List of relations Schema | Name | Type | Owner diff --git a/src/test/regress/expected/multi_multiuser.out b/src/test/regress/expected/multi_multiuser.out index 5ebba9320..e15161fc9 100644 --- a/src/test/regress/expected/multi_multiuser.out +++ b/src/test/regress/expected/multi_multiuser.out @@ -566,22 +566,6 @@ SELECT run_command_on_workers($$SELECT proowner::regrole FROM pg_proc WHERE pron (localhost,57638,t,usage_access) (2 rows) --- we don't want other tests to have metadata synced --- that might change the test outputs, so we're just trying to be careful -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); -NOTICE: dropping metadata on the node (localhost,57637) - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); -NOTICE: dropping metadata on the node (localhost,57638) - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - RESET ROLE; -- now we distribute the table as super user SELECT create_distributed_table('full_access_user_schema.t1', 'id'); diff --git a/src/test/regress/expected/multi_partitioning.out b/src/test/regress/expected/multi_partitioning.out index 3e433cb0b..33a7d8cdf 100644 --- a/src/test/regress/expected/multi_partitioning.out +++ b/src/test/regress/expected/multi_partitioning.out @@ -1940,19 +1940,27 @@ DROP TABLE partitioning_test_2008, partitioning_test_2009, partitioning_test_201 -- verify this doesn't crash and gives a debug message for dropped table SET client_min_messages TO DEBUG1; DROP TABLE partitioning_test, reference_table; +DEBUG: drop cascades to constraint partitioning_reference_fkey on table partitioning_test +DETAIL: from localhost:xxxxx +CONTEXT: SQL statement "SELECT master_remove_distributed_table_metadata_from_workers(v_obj.objid, v_obj.schema_name, v_obj.object_name)" +PL/pgSQL function citus_drop_trigger() line XX at PERFORM +DEBUG: drop cascades to constraint partitioning_reference_fkey on table partitioning_test +DETAIL: from localhost:xxxxx +CONTEXT: SQL statement "SELECT master_remove_distributed_table_metadata_from_workers(v_obj.objid, v_obj.schema_name, v_obj.object_name)" +PL/pgSQL function citus_drop_trigger() line XX at PERFORM DEBUG: switching to sequential query execution mode DETAIL: Table "" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed tables due to foreign keys. Any parallel modification to those hash distributed tables in the same transaction can only be executed in sequential query execution mode CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false)" PL/pgSQL function citus_drop_trigger() line XX at PERFORM DEBUG: drop cascades to 2 other objects -DETAIL: drop cascades to constraint partitioning_reference_fkey_1660302 on table partitioning_schema.partitioning_test_1660302 -drop cascades to constraint partitioning_reference_fkey_1660304 on table partitioning_schema.partitioning_test_1660304 +DETAIL: drop cascades to constraint partitioning_reference_fkey_1660302 on table partitioning_test_1660302 +drop cascades to constraint partitioning_reference_fkey_1660304 on table partitioning_test_1660304 DETAIL: from localhost:xxxxx CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false)" PL/pgSQL function citus_drop_trigger() line XX at PERFORM DEBUG: drop cascades to 2 other objects -DETAIL: drop cascades to constraint partitioning_reference_fkey_1660303 on table partitioning_schema.partitioning_test_1660303 -drop cascades to constraint partitioning_reference_fkey_1660305 on table partitioning_schema.partitioning_test_1660305 +DETAIL: drop cascades to constraint partitioning_reference_fkey_1660303 on table partitioning_test_1660303 +drop cascades to constraint partitioning_reference_fkey_1660305 on table partitioning_test_1660305 DETAIL: from localhost:xxxxx CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false)" PL/pgSQL function citus_drop_trigger() line XX at PERFORM @@ -3773,6 +3781,7 @@ NOTICE: Replicating reference table "supplier" to the node localhost:xxxxx NOTICE: Replicating reference table "users_ref_test_table" to the node localhost:xxxxx NOTICE: Replicating reference table "events_reference_table" to the node localhost:xxxxx NOTICE: Replicating reference table "users_reference_table" to the node localhost:xxxxx +NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata ?column? --------------------------------------------------------------------- 1 @@ -4257,6 +4266,13 @@ ERROR: non_partitioned_table is not partitioned CONTEXT: PL/pgSQL function drop_old_time_partitions(regclass,timestamp with time zone) line XX at RAISE DROP TABLE non_partitioned_table; -- https://github.com/citusdata/citus/issues/4962 +SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); +NOTICE: dropping metadata on the node (localhost,57637) + stop_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + SET citus.shard_replication_factor TO 1; SET citus.next_shard_id TO 361168; CREATE TABLE part_table_with_very_long_name ( @@ -4306,13 +4322,6 @@ WHERE schemaname = 'partitioning_schema' AND tablename ilike '%part_table_with_% (6 rows) \c - - - :master_port -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); -NOTICE: dropping metadata on the node (localhost,57637) - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - DROP SCHEMA partitioning_schema CASCADE; NOTICE: drop cascades to 4 other objects DETAIL: drop cascades to table partitioning_schema."schema-test" diff --git a/src/test/regress/expected/multi_reference_table.out b/src/test/regress/expected/multi_reference_table.out index 3071f1455..114d2c0fc 100644 --- a/src/test/regress/expected/multi_reference_table.out +++ b/src/test/regress/expected/multi_reference_table.out @@ -1397,6 +1397,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='reference_sche (0 rows) \c - - - :master_port +SET citus.next_shard_id TO 1255000; -- now test the renaming of the table, and back to the expected name ALTER TABLE reference_schema.reference_table_ddl RENAME TO reference_table_ddl_test; ALTER TABLE reference_schema.reference_table_ddl_test RENAME TO reference_table_ddl; diff --git a/src/test/regress/expected/multi_remove_node_reference_table.out b/src/test/regress/expected/multi_remove_node_reference_table.out index 11f781f57..6b5c1b35d 100644 --- a/src/test/regress/expected/multi_remove_node_reference_table.out +++ b/src/test/regress/expected/multi_remove_node_reference_table.out @@ -10,13 +10,6 @@ ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 1380000; -- create copy of pg_dist_shard_placement to reload after the test CREATE TABLE tmp_shard_placement AS SELECT * FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; DELETE FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; --- make worker 1 receive metadata changes -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -- remove non-existing node SELECT master_remove_node('localhost', 55555); ERROR: node at "localhost:xxxxx" does not exist @@ -1036,13 +1029,6 @@ SELECT 1 FROM master_activate_node('localhost', :worker_2_port); DROP TABLE remove_node_reference_table; DROP TABLE remove_node_reference_table_schema.table1; DROP SCHEMA remove_node_reference_table_schema CASCADE; -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); -NOTICE: dropping metadata on the node (localhost,57637) - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -- reload pg_dist_shard_placement table INSERT INTO pg_dist_shard_placement (SELECT * FROM tmp_shard_placement); DROP TABLE tmp_shard_placement; diff --git a/src/test/regress/expected/multi_schema_support.out b/src/test/regress/expected/multi_schema_support.out index 00be35167..b23c609ec 100644 --- a/src/test/regress/expected/multi_schema_support.out +++ b/src/test/regress/expected/multi_schema_support.out @@ -100,18 +100,14 @@ CREATE TABLE test_schema_support.nation_hash( n_regionkey integer not null, n_comment varchar(152) ); -SELECT master_create_distributed_table('test_schema_support.nation_hash', 'n_nationkey', 'hash'); - master_create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT master_create_worker_shards('test_schema_support.nation_hash', 4, 2); - master_create_worker_shards +SET citus.shard_replication_factor TO 2; +SELECT create_distributed_table('test_schema_support.nation_hash', 'n_nationkey', shard_count:=4); + create_distributed_table --------------------------------------------------------------------- (1 row) +RESET citus.shard_replication_factor; -- test cursors SET search_path TO public; BEGIN; @@ -1045,6 +1041,7 @@ SELECT * FROM new_schema.table_set_schema; DROP SCHEMA new_schema CASCADE; NOTICE: drop cascades to table new_schema.table_set_schema +SET citus.next_shard_id TO 1195000; -- test ALTER TABLE SET SCHEMA when a search path is set CREATE SCHEMA old_schema; CREATE TABLE old_schema.table_set_schema(id int); @@ -1112,6 +1109,7 @@ SELECT * FROM new_schema.table_set_schema; --------------------------------------------------------------------- (0 rows) +SET citus.next_shard_id TO 1196000; SET search_path to public; DROP SCHEMA old_schema CASCADE; DROP SCHEMA new_schema CASCADE; @@ -1227,6 +1225,7 @@ SELECT create_reference_table('schema_with_user.test_table'); (1 row) \c - - - :master_port +SET citus.next_shard_id TO 1197000; -- we do not use run_command_on_coordinator_and_workers here because when there is CASCADE, it causes deadlock DROP OWNED BY "test-user" CASCADE; NOTICE: drop cascades to table schema_with_user.test_table @@ -1441,17 +1440,11 @@ BEGIN; ALTER SCHEMA bar RENAME TO foo; ROLLBACK; -- Clean up the created schema +SET client_min_messages TO WARNING; DROP SCHEMA run_test_schema CASCADE; -NOTICE: drop cascades to table run_test_schema.test_table DROP SCHEMA test_schema_support_join_1 CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table test_schema_support_join_1.nation_hash -drop cascades to table test_schema_support_join_1.nation_hash_2 DROP SCHEMA test_schema_support_join_2 CASCADE; -NOTICE: drop cascades to table test_schema_support_join_2.nation_hash DROP SCHEMA "Citus'Teen123" CASCADE; -NOTICE: drop cascades to table "Citus'Teen123"."TeeNTabLE.1!?!" DROP SCHEMA "CiTUS.TEEN2" CASCADE; -NOTICE: drop cascades to table "CiTUS.TEEN2"."CAPITAL_TABLE" DROP SCHEMA bar CASCADE; -NOTICE: drop cascades to table bar.test +DROP SCHEMA test_schema_support CASCADE; diff --git a/src/test/regress/expected/multi_transactional_drop_shards.out b/src/test/regress/expected/multi_transactional_drop_shards.out index 4304df10c..5d59069a3 100644 --- a/src/test/regress/expected/multi_transactional_drop_shards.out +++ b/src/test/regress/expected/multi_transactional_drop_shards.out @@ -3,6 +3,7 @@ -- -- Tests that check the metadata returned by the master node. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1410000; +ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 4230000; SET citus.shard_count TO 4; SET client_min_messages TO WARNING; -- test DROP TABLE(ergo citus_drop_all_shards) in transaction, then ROLLBACK @@ -56,6 +57,7 @@ ORDER BY -- verify shards are not dropped \c - - - :worker_1_port +SET citus.override_table_visibility TO false; \dt transactional_drop_shards_* List of relations Schema | Name | Type | Owner @@ -98,6 +100,7 @@ ORDER BY -- verify shards are dropped \c - - - :worker_1_port +SET citus.override_table_visibility TO false; \dt transactional_drop_shards_* List of relations Schema | Name | Type | Owner @@ -150,6 +153,7 @@ ORDER BY -- verify shards are not dropped \c - - - :worker_1_port +SET citus.override_table_visibility TO false; \dt transactional_drop_shards_* List of relations Schema | Name | Type | Owner @@ -187,6 +191,7 @@ ORDER BY -- verify shards are dropped \c - - - :worker_1_port +SET citus.override_table_visibility TO false; \dt transactional_drop_shards_* List of relations Schema | Name | Type | Owner @@ -236,6 +241,7 @@ ORDER BY -- verify shards are not dropped \c - - - :worker_1_port +SET citus.override_table_visibility TO false; \dt transactional_drop_shards_* List of relations Schema | Name | Type | Owner @@ -277,6 +283,7 @@ ORDER BY -- verify shards are not dropped \c - - - :worker_1_port +SET citus.override_table_visibility TO false; \dt transactional_drop_shards_* List of relations Schema | Name | Type | Owner @@ -327,6 +334,7 @@ ORDER BY -- verify shards are not dropped \c - - - :worker_1_port +SET citus.override_table_visibility TO false; \dt transactional_drop_shards_* List of relations Schema | Name | Type | Owner @@ -378,7 +386,8 @@ ORDER BY -- verify shards are not dropped \c - - - :worker_1_port -\dt transactional_drop_reference* +SET citus.override_table_visibility TO false; +\dt transactional_drop_reference_* List of relations Schema | Name | Type | Owner --------------------------------------------------------------------- @@ -414,6 +423,7 @@ ORDER BY -- verify shards are not dropped \c - - - :worker_1_port +SET citus.override_table_visibility TO false; \dt transactional_drop_shards_* List of relations Schema | Name | Type | Owner @@ -488,6 +498,7 @@ ORDER BY -- verify shards and sequence are not dropped \c - - - :worker_1_port +SET citus.override_table_visibility TO false; \dt transactional_drop_serial_* List of relations Schema | Name | Type | Owner @@ -504,9 +515,10 @@ ORDER BY \ds transactional_drop_serial_column2_seq List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- -(0 rows) + public | transactional_drop_serial_column2_seq | sequence | postgres +(1 row) \c - - - :master_port -- test DROP TABLE(ergo citus_drop_all_shards) in transaction, then COMMIT @@ -540,6 +552,7 @@ ORDER BY -- verify shards and sequence are dropped \c - - - :worker_1_port +SET citus.override_table_visibility TO false; \dt transactional_drop_serial_* List of relations Schema | Name | Type | Owner @@ -656,7 +669,7 @@ ORDER BY \c - - - :master_port SET client_min_messages TO WARNING; -- try using the coordinator as a worker and then dropping the table -SELECT 1 FROM master_add_node('localhost', :master_port); +SELECT 1 FROM master_add_node('localhost', :master_port, groupid := 0); ?column? --------------------------------------------------------------------- 1 @@ -679,12 +692,6 @@ SELECT master_remove_node('localhost', :master_port); -- clean the workspace DROP TABLE transactional_drop_shards, transactional_drop_reference; -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -- test DROP TABLE as a non-superuser in a transaction block CREATE USER try_drop_table WITH LOGIN; SELECT run_command_on_workers('CREATE USER try_drop_table WITH LOGIN'); diff --git a/src/test/regress/expected/multi_unsupported_worker_operations.out b/src/test/regress/expected/multi_unsupported_worker_operations.out index 9317b0e9e..be8e64c7f 100644 --- a/src/test/regress/expected/multi_unsupported_worker_operations.out +++ b/src/test/regress/expected/multi_unsupported_worker_operations.out @@ -268,12 +268,12 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; (1 row) \c - - - :worker_2_port -SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition; +SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx\_%table%'; worker_drop_distributed_table --------------------------------------------------------------------- (0 rows) -SELECT count(*) FROM pg_dist_partition; +SELECT count(*) FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx\_%table%'; count --------------------------------------------------------------------- 0 @@ -379,7 +379,7 @@ NOTICE: dropping metadata on the node (localhost,57637) \c - - - :worker_1_port DELETE FROM pg_dist_node; -SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition; +SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx\_%table%'; worker_drop_distributed_table --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/mx_regular_user.out b/src/test/regress/expected/mx_regular_user.out index 9b7b16907..e57bf8a99 100644 --- a/src/test/regress/expected/mx_regular_user.out +++ b/src/test/regress/expected/mx_regular_user.out @@ -43,6 +43,7 @@ SELECT 1 FROM run_command_on_workers($$CREATE SCHEMA "Mx Super User";$$); (2 rows) SET citus.next_shard_id TO 2980000; +SET citus.next_placement_id TO 2980000; SET search_path TO "Mx Super User"; CREATE TABLE super_user_owned_regular_user_granted (a int PRIMARY KEY, b int); SELECT create_reference_table ('"Mx Super User".super_user_owned_regular_user_granted'); @@ -200,6 +201,7 @@ GRANT ALL ON SCHEMA "Mx Regular User" TO regular_mx_user; \c - regular_mx_user - :master_port SET search_path TO "Mx Regular User"; SET citus.next_shard_id TO 1560000; +SET citus.next_placement_id TO 1560000; -- make sure that we sync the metadata SET citus.shard_replication_factor TO 1; CREATE TABLE partitioned_table (long_column_names_1 int, long_column_names_2 int, long_column_names_3 int, long_column_names_4 int, long_column_names_5 int, long_column_names_6 int, long_column_names_7 int, long_column_names_8 int, long_column_names_9 int, long_column_names_10 int, long_column_names_11 timestamp) PARTITION BY RANGE (long_column_names_11); diff --git a/src/test/regress/expected/node_conninfo_reload.out b/src/test/regress/expected/node_conninfo_reload.out index f3c0f561d..932610bc9 100644 --- a/src/test/regress/expected/node_conninfo_reload.out +++ b/src/test/regress/expected/node_conninfo_reload.out @@ -4,6 +4,7 @@ SET search_path TO node_conninfo_reload; SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; SET citus.force_max_query_parallelization TO ON; +SET citus.next_shard_id TO 278000; create table test(a int); select create_distributed_table('test', 'a'); create_distributed_table diff --git a/src/test/regress/expected/partitioning_issue_3970.out b/src/test/regress/expected/partitioning_issue_3970.out index 7072d019a..c93f6a437 100644 --- a/src/test/regress/expected/partitioning_issue_3970.out +++ b/src/test/regress/expected/partitioning_issue_3970.out @@ -69,7 +69,7 @@ ORDER BY 1,2,3; \c - - - :worker_1_port SELECT relname, conname, pg_catalog.pg_get_constraintdef(con.oid, true) FROM pg_constraint con JOIN pg_class rel ON (rel.oid=con.conrelid) -WHERE relname LIKE 'part_table%' +WHERE relname SIMILAR TO 'part_table%\_\d%' ORDER BY 1,2,3; relname | conname | pg_get_constraintdef --------------------------------------------------------------------- diff --git a/src/test/regress/expected/validate_constraint.out b/src/test/regress/expected/validate_constraint.out index a3f75a2c5..08b03a2bf 100644 --- a/src/test/regress/expected/validate_constraint.out +++ b/src/test/regress/expected/validate_constraint.out @@ -121,6 +121,8 @@ FROM constraint_validations_in_workers ORDER BY 1, 2; name | validated --------------------------------------------------------------------- + validatable_constraint | t + validatable_constraint | t validatable_constraint_8000009 | t validatable_constraint_8000010 | t validatable_constraint_8000011 | t @@ -129,7 +131,7 @@ ORDER BY 1, 2; validatable_constraint_8000014 | t validatable_constraint_8000015 | t validatable_constraint_8000016 | t -(8 rows) +(10 rows) DROP TABLE constrained_table; DROP TABLE referenced_table CASCADE; diff --git a/src/test/regress/input/multi_alter_table_statements.source b/src/test/regress/input/multi_alter_table_statements.source index f9cddb86b..e41f81779 100644 --- a/src/test/regress/input/multi_alter_table_statements.source +++ b/src/test/regress/input/multi_alter_table_statements.source @@ -559,19 +559,19 @@ CREATE TABLE logged_test(id int); ALTER TABLE logged_test SET UNLOGGED; SELECT create_distributed_table('logged_test', 'id'); \c - - - :worker_1_port -SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test*' ORDER BY relname; +SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test_' ORDER BY relname; \c - - - :master_port -- verify SET LOGGED/UNLOGGED works after distributing the table ALTER TABLE logged_test SET LOGGED; SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test*' ORDER BY relname; \c - - - :worker_1_port -SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test*' ORDER BY relname; +SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test_' ORDER BY relname; \c - - - :master_port ALTER TABLE logged_test SET UNLOGGED; SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test*' ORDER BY relname; \c - - - :worker_1_port -SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test*' ORDER BY relname; +SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test_' ORDER BY relname; \c - - - :master_port DROP TABLE logged_test; @@ -583,7 +583,7 @@ SELECT create_distributed_table('hash_dist','id'); SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist'; \c - - - :worker_1_port -SELECT relname, reloptions FROM pg_class WHERE relkind = 'r' AND relname LIKE 'hash_dist%' ORDER BY relname; +SELECT relname, reloptions FROM pg_class WHERE relkind = 'r' AND relname LIKE 'hash_dist_%' ORDER BY relname; \c - - - :master_port -- verify that we can set and reset index storage parameters @@ -591,14 +591,14 @@ ALTER INDEX hash_dist_pkey SET(fillfactor=40); SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist_pkey'; \c - - - :worker_1_port -SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'hash_dist_pkey%' ORDER BY relname; +SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'hash_dist_pkey_%' ORDER BY relname; \c - - - :master_port ALTER INDEX hash_dist_pkey RESET(fillfactor); SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist_pkey'; \c - - - :worker_1_port -SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'hash_dist_pkey%' ORDER BY relname; +SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'hash_dist_pkey_%' ORDER BY relname; \c - - - :master_port -- verify error message on ALTER INDEX, SET TABLESPACE is unsupported @@ -611,7 +611,7 @@ CREATE UNIQUE INDEX another_index ON hash_dist(id) WITH (fillfactor=50); SELECT relname, reloptions FROM pg_class WHERE relname = 'another_index'; \c - - - :worker_1_port -SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'another_index%' ORDER BY relname; +SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'another_index_%' ORDER BY relname; \c - - - :master_port -- get rid of the index diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index 48f4c92e3..d41923a18 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -21,7 +21,6 @@ test: single_node test: single_node_truncate test: turn_mx_on test: multi_cluster_management -test: turn_mx_off # below tests are placed right after multi_cluster_management as we do # remove/add node operations and we do not want any preexisting objects @@ -33,15 +32,33 @@ test: alter_database_owner test: multi_test_helpers multi_test_helpers_superuser test: multi_test_catalog_views +test: check_mx +test: turn_mx_off test: multi_table_ddl test: multi_sequence_default test: multi_name_lengths +test: turn_mx_on test: multi_name_resolution test: multi_metadata_access test: multi_metadata_attributes test: multi_read_from_secondaries +# ---------- +# multi_citus_tools tests utility functions written for citus tools +# ---------- +test: check_mx +test: turn_mx_off +test: multi_citus_tools + +# ---------- +# multi_replicate_reference_table tests replicating reference tables to new nodes after we add new nodes +# multi_remove_node_reference_table tests metadata changes after master_remove_node +# ---------- +test: multi_replicate_reference_table +test: turn_mx_on +test: multi_remove_node_reference_table + # ---------- # The following distributed tests depend on creating a partitioned table and # uploading data to it. @@ -58,6 +75,7 @@ test: multi_row_insert insert_select_into_local_table multi_create_table_new_fea # following should not run in parallel because it relies on connection counts to workers test: insert_select_connection_leak +test: check_mx # --------- # at the end of the regression tests regarding recursively planned modifications # ensure that we don't leak any intermediate results @@ -68,7 +86,8 @@ test: ensure_no_intermediate_data_leak # ---------- # Tests for partitioning support # ---------- -test: multi_partitioning_utils multi_partitioning partitioning_issue_3970 replicated_partitioned_table +test: multi_partitioning_utils replicated_partitioned_table +test: multi_partitioning partitioning_issue_3970 test: drop_partitioned_table test: multi_fix_partition_shard_index_names test: partition_wise_join @@ -83,8 +102,11 @@ test: multi_create_fdw # ---------- # Tests for statistics propagation # ---------- +test: check_mx +test: turn_mx_off test: propagate_statistics test: pg13_propagate_statistics +test: turn_mx_on # ---------- # Test for updating table statistics @@ -130,7 +152,6 @@ test: with_executors with_join with_partitioning with_transactions with_dml # ---------- # Tests around DDL statements run on distributed tables # ---------- -test: multi_index_statements test: multi_alter_table_statements test: multi_alter_table_add_constraints @@ -162,16 +183,24 @@ test: multi_outer_join test: multi_complex_count_distinct multi_select_distinct test: multi_modifications test: multi_distribution_metadata -test: multi_generate_ddl_commands multi_create_shards multi_prune_shard_list multi_repair_shards +test: multi_prune_shard_list test: multi_upsert multi_simple_queries multi_data_types test: master_copy_shard_placement # multi_utilities cannot be run in parallel with other tests because it checks # global locks test: multi_utilities test: foreign_key_to_reference_table validate_constraint -test: multi_modifying_xacts test: multi_repartition_udt multi_repartitioned_subquery_udf multi_subtransactions + +test: multi_modifying_xacts +test: check_mx +test: turn_mx_off +test: multi_index_statements +test: multi_generate_ddl_commands multi_repair_shards +test: multi_create_shards test: multi_transaction_recovery +test: multi_copy +test: turn_mx_on test: local_dist_join_modifications test: local_table_join @@ -182,7 +211,7 @@ test: citus_local_dist_joins # multi_copy creates hash and range-partitioned tables and performs COPY # multi_router_planner creates hash partitioned tables. # --------- -test: multi_copy fast_path_router_modify pg_dump +test: fast_path_router_modify pg_dump test: multi_router_planner # These 2 tests have prepared statements which sometimes get invalidated by concurrent tests, # changing the debug output. We should not run them in parallel with others @@ -194,6 +223,8 @@ test: multi_router_planner_fast_path # ---------- test: multi_large_shardid +test: check_mx +test: turn_mx_off # ---------- # multi_size_queries tests various size commands on distributed tables # ---------- @@ -210,6 +241,7 @@ test: multi_drop_extension # ---------- test: multi_metadata_sync test: multi_unsupported_worker_operations +test: turn_mx_on # ---------- # grant_on_schema_propagation tests if the GRANT ... ON SCHEMA queries are propagated correctly @@ -221,6 +253,8 @@ test: multi_schema_support # ---------- # multi_function_evaluation tests edge-cases in master-side function pre-evaluation # ---------- +test: check_mx +test: turn_mx_off test: multi_function_evaluation # ---------- @@ -233,13 +267,9 @@ test: multi_truncate # multi_colocated_shard_transfer tests master_copy_shard_placement with colocated tables. # ---------- test: multi_colocation_utils +test: turn_mx_on test: multi_colocated_shard_transfer -# ---------- -# multi_citus_tools tests utility functions written for citus tools -# ---------- -test: multi_citus_tools - # ---------- # node_conninfo_reload tests that node_conninfo changes take effect # ---------- @@ -251,32 +281,32 @@ test: node_conninfo_reload test: multi_foreign_key test: multi_foreign_key_relation_graph -# ---------- -# multi_replicate_reference_table tests replicating reference tables to new nodes after we add new nodes -# multi_remove_node_reference_table tests metadata changes after master_remove_node -# ---------- -test: multi_replicate_reference_table -test: multi_remove_node_reference_table - # -------- # Replicating reference tables to coordinator. Add coordinator to pg_dist_node # and rerun some of the tests. # -------- +test: check_mx +test: turn_mx_off test: add_coordinator -test: multi_reference_table citus_local_tables_queries test: foreign_key_to_reference_table -test: citus_local_table_triggers test: replicate_reference_tables_to_coordinator +test: citus_local_tables +test: multi_row_router_insert mixed_relkind_tests +test: turn_mx_on +test: multi_reference_table citus_local_tables_queries +test: citus_local_table_triggers test: coordinator_shouldhaveshards test: local_shard_utility_command_execution -test: citus_local_tables -test: multi_row_router_insert mixed_relkind_tests create_ref_dist_from_citus_local +test: create_ref_dist_from_citus_local test: undistribute_table_cascade test: create_citus_local_table_cascade test: fkeys_between_local_ref test: auto_undist_citus_local +test: check_mx +test: turn_mx_off test: mx_regular_user test: remove_coordinator +test: turn_mx_on # ---------- # multi_transactional_drop_shards tests for dropping shards using connection API @@ -308,9 +338,12 @@ test: ssl_by_default # object distribution tests # --------- test: distributed_types distributed_types_conflict disable_object_propagation distributed_types_xact_add_enum_value +test: check_mx +test: turn_mx_off test: distributed_functions distributed_functions_conflict test: distributed_collations distributed_collations_conflict test: distributed_procedure +test: turn_mx_on # --------- # deparsing logic tests @@ -339,3 +372,4 @@ test: ensure_no_intermediate_data_leak # -------- test: ensure_no_shared_connection_leak +test: check_mx diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule index 2e13902fb..cc6a2af86 100644 --- a/src/test/regress/multi_schedule +++ b/src/test/regress/multi_schedule @@ -30,7 +30,7 @@ test: ensure_no_intermediate_data_leak # ---------- # Tests for partitioning support # ---------- -test: multi_partitioning_utils replicated_partitioned_table +test: multi_partitioning_utils multi_partitioning replicated_partitioned_table # ---------- @@ -60,7 +60,6 @@ test: tableam drop_column_partitioned_table # mx-less tests test: check_mx test: turn_mx_off -test: multi_partitioning test: undistribute_table test: turn_mx_on diff --git a/src/test/regress/output/multi_alter_table_statements.source b/src/test/regress/output/multi_alter_table_statements.source index 576d540de..b84b81127 100644 --- a/src/test/regress/output/multi_alter_table_statements.source +++ b/src/test/regress/output/multi_alter_table_statements.source @@ -1074,7 +1074,7 @@ SELECT create_distributed_table('logged_test', 'id'); (1 row) \c - - - :worker_1_port -SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test*' ORDER BY relname; +SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test_' ORDER BY relname; relname | logged_info --------------------+------------- logged_test_220022 | unlogged @@ -1093,7 +1093,7 @@ SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logg (1 row) \c - - - :worker_1_port -SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test*' ORDER BY relname; +SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test_' ORDER BY relname; relname | logged_info --------------------+------------- logged_test_220022 | logged @@ -1111,7 +1111,7 @@ SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logg (1 row) \c - - - :worker_1_port -SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test*' ORDER BY relname; +SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test_' ORDER BY relname; relname | logged_info --------------------+------------- logged_test_220022 | unlogged @@ -1138,7 +1138,7 @@ SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist'; (1 row) \c - - - :worker_1_port -SELECT relname, reloptions FROM pg_class WHERE relkind = 'r' AND relname LIKE 'hash_dist%' ORDER BY relname; +SELECT relname, reloptions FROM pg_class WHERE relkind = 'r' AND relname LIKE 'hash_dist_%' ORDER BY relname; relname | reloptions ------------------+----------------- hash_dist_220026 | {fillfactor=40} @@ -1157,7 +1157,7 @@ SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist_pkey'; (1 row) \c - - - :worker_1_port -SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'hash_dist_pkey%' ORDER BY relname; +SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'hash_dist_pkey_%' ORDER BY relname; relname | reloptions -----------------------+----------------- hash_dist_pkey_220026 | {fillfactor=40} @@ -1175,7 +1175,7 @@ SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist_pkey'; (1 row) \c - - - :worker_1_port -SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'hash_dist_pkey%' ORDER BY relname; +SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'hash_dist_pkey_%' ORDER BY relname; relname | reloptions -----------------------+------------ hash_dist_pkey_220026 | @@ -1199,7 +1199,7 @@ SELECT relname, reloptions FROM pg_class WHERE relname = 'another_index'; (1 row) \c - - - :worker_1_port -SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'another_index%' ORDER BY relname; +SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'another_index_%' ORDER BY relname; relname | reloptions ----------------------+----------------- another_index_220026 | {fillfactor=50} diff --git a/src/test/regress/output/multi_copy.source b/src/test/regress/output/multi_copy.source index f122d15df..2359b1089 100644 --- a/src/test/regress/output/multi_copy.source +++ b/src/test/regress/output/multi_copy.source @@ -772,7 +772,7 @@ SELECT shardid, shardstate, nodename, nodeport -- try to insert into a reference table copy should fail COPY numbers_reference FROM STDIN WITH (FORMAT 'csv'); ERROR: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "test_user" is not permitted to log in -CONTEXT: COPY numbers_reference, line 1: "3,1" +CONTEXT: COPY numbers_reference, line 1: "3,1" -- verify shards for reference table are still valid SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) diff --git a/src/test/regress/sql/alter_index.sql b/src/test/regress/sql/alter_index.sql index 218de6efb..3531bad18 100644 --- a/src/test/regress/sql/alter_index.sql +++ b/src/test/regress/sql/alter_index.sql @@ -31,7 +31,7 @@ ORDER BY c.relname, a.attnum; \c - - - :worker_1_port SELECT c.relname, a.attstattarget FROM pg_attribute a -JOIN pg_class c ON a.attrelid = c.oid AND c.relname LIKE 'test\_idx%' +JOIN pg_class c ON a.attrelid = c.oid AND c.relname SIMILAR TO 'test\_idx%\_\d%' ORDER BY c.relname, a.attnum; \c - - - :master_port diff --git a/src/test/regress/sql/auto_undist_citus_local.sql b/src/test/regress/sql/auto_undist_citus_local.sql index a792b2daa..8fe3d642c 100644 --- a/src/test/regress/sql/auto_undist_citus_local.sql +++ b/src/test/regress/sql/auto_undist_citus_local.sql @@ -5,6 +5,7 @@ CREATE SCHEMA drop_fkey_cascade; SET search_path TO drop_fkey_cascade; SET client_min_messages TO WARNING; SET citus.next_shard_id TO 1810000; +SET citus.next_placement_id TO 3070000; SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0); diff --git a/src/test/regress/sql/coordinator_shouldhaveshards.sql b/src/test/regress/sql/coordinator_shouldhaveshards.sql index 85604a366..159a42d6c 100644 --- a/src/test/regress/sql/coordinator_shouldhaveshards.sql +++ b/src/test/regress/sql/coordinator_shouldhaveshards.sql @@ -3,6 +3,7 @@ CREATE SCHEMA coordinator_shouldhaveshards; SET search_path TO coordinator_shouldhaveshards; SET citus.next_shard_id TO 1503000; +SET citus.next_placement_id TO 1503000; -- idempotently add node to allow this test to run without add_coordinator SET client_min_messages TO WARNING; diff --git a/src/test/regress/sql/create_citus_local_table_cascade.sql b/src/test/regress/sql/create_citus_local_table_cascade.sql index 738073bb3..677f8ac0d 100644 --- a/src/test/regress/sql/create_citus_local_table_cascade.sql +++ b/src/test/regress/sql/create_citus_local_table_cascade.sql @@ -1,6 +1,7 @@ \set VERBOSITY terse SET citus.next_shard_id TO 1516000; +SET citus.next_placement_id TO 1516000; SET citus.shard_replication_factor TO 1; CREATE SCHEMA citus_add_local_table_to_metadata_cascade; diff --git a/src/test/regress/sql/create_ref_dist_from_citus_local.sql b/src/test/regress/sql/create_ref_dist_from_citus_local.sql index 6d01aa47c..7693d3300 100644 --- a/src/test/regress/sql/create_ref_dist_from_citus_local.sql +++ b/src/test/regress/sql/create_ref_dist_from_citus_local.sql @@ -1,6 +1,7 @@ \set VERBOSITY terse SET citus.next_shard_id TO 1800000; +SET citus.next_placement_id TO 8500000; SET citus.shard_replication_factor TO 1; CREATE SCHEMA create_ref_dist_from_citus_local; diff --git a/src/test/regress/sql/drop_partitioned_table.sql b/src/test/regress/sql/drop_partitioned_table.sql index f80e1edec..e5eb7dc23 100644 --- a/src/test/regress/sql/drop_partitioned_table.sql +++ b/src/test/regress/sql/drop_partitioned_table.sql @@ -69,7 +69,7 @@ ORDER BY 1, 2; \c - - - :master_port SET search_path = drop_partitioned_table; -SET citus.next_shard_id TO 720000; +SET citus.next_shard_id TO 721000; -- CASE 1 -- Dropping the parent table @@ -91,7 +91,7 @@ SELECT * FROM drop_partitioned_table.tables_info; \c - - - :master_port SET search_path = drop_partitioned_table; -SET citus.next_shard_id TO 720000; +SET citus.next_shard_id TO 722000; -- CASE 2 -- Dropping the parent table, but including children in the DROP command @@ -113,7 +113,7 @@ SELECT * FROM drop_partitioned_table.tables_info; \c - - - :master_port SET search_path = drop_partitioned_table; -SET citus.next_shard_id TO 720000; +SET citus.next_shard_id TO 723000; -- CASE 3 -- DROP OWNED BY role1; Only parent is owned by role1, children are owned by another owner @@ -144,7 +144,7 @@ SELECT * FROM drop_partitioned_table.tables_info; \c - - - :master_port SET search_path = drop_partitioned_table; -SET citus.next_shard_id TO 720000; +SET citus.next_shard_id TO 724000; -- CASE 4 -- DROP OWNED BY role1; Parent and children are owned by role1 @@ -169,7 +169,7 @@ SELECT * FROM drop_partitioned_table.tables_info; \c - - - :master_port SET search_path = drop_partitioned_table; -SET citus.next_shard_id TO 720000; +SET citus.next_shard_id TO 725000; REVOKE ALL ON SCHEMA drop_partitioned_table FROM role1; DROP ROLE role1; SELECT run_command_on_workers('DROP ROLE IF EXISTS role1'); @@ -195,7 +195,7 @@ SET search_path = drop_partitioned_table, schema1; SELECT * FROM drop_partitioned_table.tables_info; \c - - - :master_port -SET citus.next_shard_id TO 720000; +SET citus.next_shard_id TO 726000; -- CASE 6 -- DROP SCHEMA schema1 CASCADE; Parent and children are in schema1 CREATE SCHEMA schema1; @@ -222,7 +222,7 @@ SET search_path = drop_partitioned_table; -- Check that we actually skip sending remote commands to skip shards SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 1; -SET citus.next_shard_id TO 720000; +SET citus.next_shard_id TO 727000; DROP EVENT TRIGGER new_trigger_for_drops; -- Case 1 - we should skip diff --git a/src/test/regress/sql/fkeys_between_local_ref.sql b/src/test/regress/sql/fkeys_between_local_ref.sql index 630cde968..6afce7f3f 100644 --- a/src/test/regress/sql/fkeys_between_local_ref.sql +++ b/src/test/regress/sql/fkeys_between_local_ref.sql @@ -1,6 +1,7 @@ \set VERBOSITY terse SET citus.next_shard_id TO 1518000; +SET citus.next_placement_id TO 4090000; SET citus.shard_replication_factor TO 1; CREATE SCHEMA fkeys_between_local_ref; diff --git a/src/test/regress/sql/foreign_key_to_reference_table.sql b/src/test/regress/sql/foreign_key_to_reference_table.sql index c03cca61d..dd89d0c39 100644 --- a/src/test/regress/sql/foreign_key_to_reference_table.sql +++ b/src/test/regress/sql/foreign_key_to_reference_table.sql @@ -97,50 +97,50 @@ SELECT create_reference_table('referenced_table'); CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(id) REFERENCES referenced_table(id) ON DELETE SET NULL; -SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(id) REFERENCES referenced_table(id) ON DELETE SET NULL); SELECT create_distributed_table('referencing_table', 'ref_id'); -SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(id) REFERENCES referenced_table(id) ON DELETE SET DEFAULT; -SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; DROP TABLE referencing_table; BEGIN; CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(id) REFERENCES referenced_table(id) ON DELETE SET DEFAULT); SELECT create_distributed_table('referencing_table', 'ref_id'); COMMIT; -SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(id) REFERENCES referenced_table(id) ON UPDATE SET NULL; -SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(id) REFERENCES referenced_table(id) ON UPDATE SET DEFAULT; -SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(id) REFERENCES referenced_table(id) ON UPDATE CASCADE; -SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; DROP TABLE referencing_table; -- check if we can add the foreign key while adding the column CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); ALTER TABLE referencing_table ADD COLUMN referencing int REFERENCES referenced_table(id) ON UPDATE CASCADE; -SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; DROP TABLE referencing_table; -- foreign keys are only supported when the replication factor = 1 @@ -148,21 +148,21 @@ SET citus.shard_replication_factor TO 2; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (id) REFERENCES referenced_table(id); -SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; DROP TABLE referencing_table; -- should fail when we add the column as well CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); ALTER TABLE referencing_table ADD COLUMN referencing_col int REFERENCES referenced_table(id) ON DELETE SET NULL; -SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; DROP TABLE referencing_table; SET citus.shard_replication_factor TO 1; -- simple create_distributed_table should work in/out transactions on tables with foreign key to reference tables CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY (id) REFERENCES referenced_table(id)); SELECT create_distributed_table('referencing_table', 'ref_id'); -SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; DROP TABLE referencing_table; DROP TABLE referenced_table; @@ -172,7 +172,7 @@ BEGIN; CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY (id) REFERENCES referenced_table(id)); SELECT create_distributed_table('referencing_table', 'ref_id'); COMMIT; -SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; DROP TABLE referencing_table; -- foreign keys are supported either in between distributed tables including the @@ -411,7 +411,7 @@ SELECT create_distributed_table('referencing_table', 'id'); ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (id) REFERENCES referenced_table(test_column) ON DELETE CASCADE; ALTER TABLE referencing_table ADD CONSTRAINT foreign_key_2 FOREIGN KEY (id) REFERENCES referenced_table2(test_column2) ON DELETE CASCADE; -SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; INSERT INTO referenced_table SELECT x, x+1 FROM generate_series(0,1000) AS f(x); INSERT INTO referenced_table2 SELECT x, x+1 FROM generate_series(500,1500) AS f(x); @@ -442,7 +442,7 @@ SELECT create_reference_table('referenced_table'); SELECT create_reference_table('referenced_table2'); SELECT create_distributed_table('referencing_table', 'id'); -SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT count(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; \set VERBOSITY terse DROP TABLE referenced_table CASCADE; @@ -468,7 +468,7 @@ BEGIN; ALTER TABLE referencing_table ADD CONSTRAINT foreign_key_2 FOREIGN KEY (ref_id) REFERENCES referenced_table2(test_column2) ON DELETE CASCADE; COMMIT; -SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; INSERT INTO referenced_table SELECT x, x+1 FROM generate_series(0,1000) AS f(x); INSERT INTO referenced_table2 SELECT x, x+1 FROM generate_series(500,1500) AS f(x); @@ -503,7 +503,7 @@ BEGIN; ALTER TABLE referencing_table ADD CONSTRAINT foreign_key_2 FOREIGN KEY (ref_id) REFERENCES referenced_table2(test_column2) ON DELETE CASCADE; COMMIT; -SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT count(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; DROP TABLE referenced_table CASCADE; DROP TABLE referenced_table2 CASCADE; @@ -532,7 +532,7 @@ ALTER TABLE referencing_table2 ADD CONSTRAINT fkey_ref FOREIGN KEY (ref_id) REFE ALTER TABLE referencing_table2 ADD CONSTRAINT fkey_ref_to_dist FOREIGN KEY (id) REFERENCES referencing_table(id) ON DELETE CASCADE; COMMIT; -SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; INSERT INTO referenced_table SELECT x, x+1 FROM generate_series(0,1000) AS f(x); -- should fail @@ -560,7 +560,7 @@ DROP TABLE referencing_table2 CASCADE; CREATE TABLE referenced_table(test_column int, test_column2 int UNIQUE, PRIMARY KEY(test_column)); SELECT create_reference_table('referenced_table'); -SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT count(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; \set VERBOSITY terse DROP TABLE referenced_table CASCADE; @@ -579,7 +579,7 @@ SELECT create_distributed_table('referencing_table', 'id'); SELECT create_distributed_table('referencing_referencing_table', 'id'); ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (ref_id, ref_id2) REFERENCES referenced_table(test_column, test_column2) ON DELETE CASCADE; -SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.referencing%'; +SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.referencing%\d{2,}'; INSERT INTO referenced_table SELECT x, x+1 FROM generate_series(1,1000) AS f(x); INSERT INTO referencing_table SELECT x, x+1, x+2 FROM generate_series(1,999) AS f(x); @@ -647,10 +647,10 @@ CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) SELECT create_reference_table('test_table_1'); SELECT create_distributed_table('test_table_2', 'id'); -SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT count(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; ALTER TABLE test_table_2 DROP CONSTRAINT test_table_2_value_1_fkey; -SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT count(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; DROP TABLE test_table_1, test_table_2; @@ -666,7 +666,7 @@ BEGIN; ALTER TABLE test_table_2 DROP CONSTRAINT test_table_2_value_1_fkey; COMMIT; -SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT count(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; DROP TABLE test_table_1, test_table_2; -- check if we can drop the primary key which cascades to the foreign key @@ -677,7 +677,7 @@ SELECT create_reference_table('test_table_1'); SELECT create_distributed_table('test_table_2', 'id'); ALTER TABLE test_table_1 DROP CONSTRAINT test_table_1_pkey CASCADE; -SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT count(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; DROP TABLE test_table_1, test_table_2; -- check if we can drop the primary key which cascades to the foreign key in a transaction block @@ -691,7 +691,7 @@ BEGIN; ALTER TABLE test_table_1 DROP CONSTRAINT test_table_1_pkey CASCADE; COMMIT; -SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT count(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; DROP TABLE test_table_1, test_table_2; -- check if we can drop the column which foreign key is referencing from @@ -702,7 +702,7 @@ SELECT create_reference_table('test_table_1'); SELECT create_distributed_table('test_table_2', 'id'); ALTER TABLE test_table_2 DROP COLUMN value_1; -SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT count(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; DROP TABLE test_table_1, test_table_2; -- check if we can drop the column which foreign key is referencing from in a transaction block @@ -715,7 +715,7 @@ BEGIN; ALTER TABLE test_table_2 DROP COLUMN value_1; COMMIT; -SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT count(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; DROP TABLE test_table_1, test_table_2; -- check if we can drop the column which foreign key is referencing to @@ -726,7 +726,7 @@ SELECT create_reference_table('test_table_1'); SELECT create_distributed_table('test_table_2', 'id'); ALTER TABLE test_table_1 DROP COLUMN id CASCADE; -SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT count(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; DROP TABLE test_table_1, test_table_2; -- check if we can drop the column which foreign key is referencing from in a transaction block @@ -739,7 +739,7 @@ BEGIN; ALTER TABLE test_table_1 DROP COLUMN id CASCADE; COMMIT; -SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT count(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; DROP TABLE test_table_1, test_table_2; -- check if we can alter the column type which foreign key is referencing to @@ -759,7 +759,7 @@ INSERT INTO test_table_1 VALUES (2147483648,4); INSERT INTO test_table_2 VALUES (4,2147483648); -- should fail since there is a bigint out of integer range > (2^32 - 1) ALTER TABLE test_table_2 ALTER COLUMN value_1 SET DATA TYPE int; -SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT count(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; DROP TABLE test_table_1 CASCADE; DROP TABLE test_table_2; @@ -775,7 +775,7 @@ BEGIN; ALTER TABLE test_table_1 DROP COLUMN id CASCADE; COMMIT; -SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; +SELECT count(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}'; DROP TABLE test_table_1, test_table_2; -- check if we can TRUNCATE the referenced table diff --git a/src/test/regress/sql/local_shard_utility_command_execution.sql b/src/test/regress/sql/local_shard_utility_command_execution.sql index d754c5888..bebaa5167 100644 --- a/src/test/regress/sql/local_shard_utility_command_execution.sql +++ b/src/test/regress/sql/local_shard_utility_command_execution.sql @@ -7,6 +7,7 @@ \set VERBOSITY terse SET citus.next_shard_id TO 1500000; +SET citus.next_placement_id TO 8300000; SET citus.shard_replication_factor TO 1; SET citus.enable_local_execution TO ON; SET citus.shard_COUNT TO 32; diff --git a/src/test/regress/sql/master_copy_shard_placement.sql b/src/test/regress/sql/master_copy_shard_placement.sql index c5dd7beeb..9f6949e37 100644 --- a/src/test/regress/sql/master_copy_shard_placement.sql +++ b/src/test/regress/sql/master_copy_shard_placement.sql @@ -105,7 +105,5 @@ SELECT master_copy_shard_placement( do_repair := false, transfer_mode := 'block_writes'); -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); - SET client_min_messages TO ERROR; DROP SCHEMA mcsp CASCADE; diff --git a/src/test/regress/sql/multi_colocation_utils.sql b/src/test/regress/sql/multi_colocation_utils.sql index ae19d5bb4..63b122405 100644 --- a/src/test/regress/sql/multi_colocation_utils.sql +++ b/src/test/regress/sql/multi_colocation_utils.sql @@ -153,8 +153,8 @@ SELECT find_shard_interval_index(1300016); -- check external colocation API -SELECT count(*) FROM pg_dist_partition WHERE colocationid = 4; -DELETE FROM pg_dist_colocation WHERE colocationid = 4; +SELECT count(*) FROM pg_dist_partition WHERE colocationid IN (4, 5); +DELETE FROM pg_dist_colocation WHERE colocationid IN (4, 5); SET citus.shard_count = 2; diff --git a/src/test/regress/sql/multi_fix_partition_shard_index_names.sql b/src/test/regress/sql/multi_fix_partition_shard_index_names.sql index 24ddeb2a4..6bfd32ed0 100644 --- a/src/test/regress/sql/multi_fix_partition_shard_index_names.sql +++ b/src/test/regress/sql/multi_fix_partition_shard_index_names.sql @@ -9,6 +9,9 @@ SET citus.shard_replication_factor TO 1; CREATE SCHEMA fix_idx_names; SET search_path TO fix_idx_names, public; +-- stop metadata sync for one of the worker nodes so we test both cases +SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); + -- NULL input should automatically return NULL since -- fix_partition_shard_index_names is strict -- same for worker_fix_partition_shard_index_names @@ -47,7 +50,7 @@ SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' O -- the names are generated correctly -- shard id has been appended to all index names which didn't end in shard id -- this goes in line with Citus's way of naming indexes of shards: always append shardid to the end -SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2; +SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' AND tablename SIMILAR TO '%\_\d*' ORDER BY 1, 2; \c - - - :master_port -- this should work properly @@ -101,7 +104,6 @@ SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' O \c - - - :master_port SET search_path TO fix_idx_names, public; -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); DROP INDEX short; DROP TABLE yet_another_partition_table, another_partition_table_with_very_long_name; @@ -121,7 +123,7 @@ CREATE INDEX ON dist_partitioned_table USING btree (dist_col, partition_col); SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2; \c - - - :worker_1_port -- index names end in shardid for partitions -SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2; +SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' AND tablename SIMILAR TO '%\_\d*' ORDER BY 1, 2; \c - - - :master_port SET search_path TO fix_idx_names, public; @@ -151,7 +153,7 @@ SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' O \c - - - :worker_1_port -- index names are already correct, including inherited index for another_partition -SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2; +SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' AND tablename SIMILAR TO '%\_\d*' ORDER BY 1, 2; \c - - - :master_port SET search_path TO fix_idx_names, public; @@ -175,7 +177,7 @@ SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' O \c - - - :worker_1_port -- we have correct names -SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2; +SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' AND tablename SIMILAR TO '%\_\d*' ORDER BY 1, 2; \c - - - :master_port SET search_path TO fix_idx_names, public; @@ -193,7 +195,7 @@ SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' O SELECT fix_partition_shard_index_names('dist_partitioned_table'::regclass); \c - - - :worker_1_port -SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2; +SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' AND tablename SIMILAR TO '%\_\d*' ORDER BY 1, 2; \c - - - :master_port SET search_path TO fix_idx_names, public; @@ -220,7 +222,7 @@ SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' O -- index names are correct -- shard id has been appended to all index names which didn't end in shard id -- this goes in line with Citus's way of naming indexes of shards: always append shardid to the end -SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2; +SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' AND tablename SIMILAR TO '%\_\d*' ORDER BY 1, 2; \c - - - :master_port SET search_path TO fix_idx_names, public; @@ -266,7 +268,7 @@ DROP INDEX p_another_col_partition_col_idx; \c - - - :worker_1_port -- check that indexes have been renamed -- and that index on p has been dropped (it won't appear) -SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' ORDER BY 1, 2; +SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' AND tablename SIMILAR TO '%\_\d*' ORDER BY 1, 2; \c - - - :master_port SET search_path TO fix_idx_names, public; diff --git a/src/test/regress/sql/multi_function_evaluation.sql b/src/test/regress/sql/multi_function_evaluation.sql index cad63fe9d..b31d6b7a6 100644 --- a/src/test/regress/sql/multi_function_evaluation.sql +++ b/src/test/regress/sql/multi_function_evaluation.sql @@ -14,9 +14,10 @@ SET citus.enable_fast_path_router_planner TO false; -- nextval() works (no good way to test DEFAULT, or, by extension, SERIAL) CREATE TABLE example (key INT, value INT); -SELECT master_create_distributed_table('example', 'key', 'hash'); +SET citus.shard_replication_factor TO 2; +SELECT create_distributed_table('example', 'key', shard_count:=1); +RESET citus.shard_replication_factor; CREATE SEQUENCE example_value_seq; -SELECT master_create_worker_shards('example', 1, 2); INSERT INTO example VALUES (1, nextval('example_value_seq')); SELECT * FROM example; diff --git a/src/test/regress/sql/multi_index_statements.sql b/src/test/regress/sql/multi_index_statements.sql index c19384ab3..e6ae776a6 100644 --- a/src/test/regress/sql/multi_index_statements.sql +++ b/src/test/regress/sql/multi_index_statements.sql @@ -107,10 +107,10 @@ DROP TABLE local_table; -- Verify that all indexes got created on the master node and one of the workers SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_test_%' ORDER BY indexname; \c - - - :worker_1_port -SELECT count(*) FROM pg_indexes WHERE tablename = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1); -SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_hash%'; -SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_range%'; -SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_append%'; +SELECT count(*) FROM pg_indexes WHERE tablename = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_%' ORDER BY relname LIMIT 1); +SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_hash_%'; +SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_range_%'; +SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_append_%'; \c - - - :master_port SET search_path TO multi_index_statements, public; @@ -335,7 +335,7 @@ SELECT count(*) FROM pg_inherits WHERE inhrelid::regclass::text = 'child_index' SET search_path TO multi_index_statements; -- show that child indices of partition shards also inherit from parent indices of parent shards -SELECT count(*) FROM pg_inherits WHERE inhrelid::regclass::text LIKE 'child_index%' AND inhparent::regclass::text LIKE 'parent_index%'; +SELECT count(*) FROM pg_inherits WHERE inhrelid::regclass::text LIKE 'child_index\_%' AND inhparent::regclass::text LIKE 'parent_index\_%'; \c - - - :master_port SET search_path TO multi_index_statements; diff --git a/src/test/regress/sql/multi_metadata_sync.sql b/src/test/regress/sql/multi_metadata_sync.sql index 07b7148a5..d49f817d7 100644 --- a/src/test/regress/sql/multi_metadata_sync.sql +++ b/src/test/regress/sql/multi_metadata_sync.sql @@ -108,9 +108,9 @@ SELECT nodeid, hasmetadata FROM pg_dist_node WHERE nodename='localhost' AND node \c - - - :worker_1_port SELECT * FROM pg_dist_local_group; SELECT * FROM pg_dist_node ORDER BY nodeid; -SELECT * FROM pg_dist_partition ORDER BY logicalrelid; -SELECT * FROM pg_dist_shard ORDER BY shardid; -SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport; +SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid; +SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid; +SELECT * FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%') ORDER BY shardid, nodename, nodeport; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_testing_schema.mx_test_table'::regclass; SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_testing_schema.mx_test_table_col_1_key'::regclass; @@ -158,9 +158,9 @@ SELECT start_metadata_sync_to_node('localhost', :worker_1_port); \c - - - :worker_1_port SELECT * FROM pg_dist_local_group; SELECT * FROM pg_dist_node ORDER BY nodeid; -SELECT * FROM pg_dist_partition ORDER BY logicalrelid; -SELECT * FROM pg_dist_shard ORDER BY shardid; -SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport; +SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid; +SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid; +SELECT * FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%') ORDER BY shardid, nodename, nodeport; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_testing_schema.mx_test_table'::regclass; SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_testing_schema.mx_test_table_col_1_key'::regclass; @@ -294,8 +294,8 @@ ORDER BY \d mx_test_schema_1.mx_table_1 \d mx_test_schema_2.mx_table_2 -SELECT * FROM pg_dist_partition; -SELECT * FROM pg_dist_shard; +SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_test_schema%'; +SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_test_schema%'; SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport; -- Check that CREATE INDEX statement is propagated diff --git a/src/test/regress/sql/multi_modifying_xacts.sql b/src/test/regress/sql/multi_modifying_xacts.sql index 4caac28ba..a3b004546 100644 --- a/src/test/regress/sql/multi_modifying_xacts.sql +++ b/src/test/regress/sql/multi_modifying_xacts.sql @@ -16,11 +16,13 @@ CREATE TABLE labs ( name text NOT NULL ); -SELECT master_create_distributed_table('researchers', 'lab_id', 'hash'); -SELECT master_create_worker_shards('researchers', 2, 2); +SET citus.shard_replication_factor TO 2; +SELECT create_distributed_table('researchers', 'lab_id', shard_count:=2); -SELECT master_create_distributed_table('labs', 'id', 'hash'); -SELECT master_create_worker_shards('labs', 1, 1); +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('labs', 'id', shard_count:=1); + +RESET citus.shard_replication_factor; -- might be confusing to have two people in the same lab with the same name CREATE UNIQUE INDEX avoid_name_confusion_idx ON researchers (lab_id, name); @@ -920,6 +922,7 @@ SELECT create_distributed_table('numbers_hash_failure_test', 'key'); -- ensure that the shard is created for this user \c - test_user - :worker_1_port +SET citus.override_table_visibility TO false; \dt reference_failure_test_1200015 -- now connect with the default user, @@ -939,8 +942,6 @@ COMMIT; BEGIN; COPY reference_failure_test FROM STDIN WITH (FORMAT 'csv'); -2,2 -\. COMMIT; -- show that no data go through the table and shard states are good @@ -961,9 +962,6 @@ ORDER BY s.logicalrelid, sp.shardstate; -- any failure rollbacks the transaction BEGIN; COPY numbers_hash_failure_test FROM STDIN WITH (FORMAT 'csv'); -1,1 -2,2 -\. ABORT; -- none of placements are invalid after abort @@ -984,9 +982,6 @@ ORDER BY shardid, nodeport; -- all failures roll back the transaction BEGIN; COPY numbers_hash_failure_test FROM STDIN WITH (FORMAT 'csv'); -1,1 -2,2 -\. COMMIT; -- expect none of the placements to be market invalid after commit diff --git a/src/test/regress/sql/multi_multiuser.sql b/src/test/regress/sql/multi_multiuser.sql index 0c2a3fdb3..204b7360d 100644 --- a/src/test/regress/sql/multi_multiuser.sql +++ b/src/test/regress/sql/multi_multiuser.sql @@ -342,11 +342,6 @@ SELECT create_distributed_function('usage_access_func_third(int,int[])', '$1', c SELECT proowner::regrole FROM pg_proc WHERE proname = 'usage_access_func_third'; SELECT run_command_on_workers($$SELECT proowner::regrole FROM pg_proc WHERE proname = 'usage_access_func_third'$$); --- we don't want other tests to have metadata synced --- that might change the test outputs, so we're just trying to be careful -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); -SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); - RESET ROLE; -- now we distribute the table as super user SELECT create_distributed_table('full_access_user_schema.t1', 'id'); diff --git a/src/test/regress/sql/multi_partitioning.sql b/src/test/regress/sql/multi_partitioning.sql index eaf614fae..d95413dc9 100644 --- a/src/test/regress/sql/multi_partitioning.sql +++ b/src/test/regress/sql/multi_partitioning.sql @@ -1969,6 +1969,7 @@ CALL drop_old_time_partitions('non_partitioned_table', now()); DROP TABLE non_partitioned_table; -- https://github.com/citusdata/citus/issues/4962 +SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); SET citus.shard_replication_factor TO 1; SET citus.next_shard_id TO 361168; CREATE TABLE part_table_with_very_long_name ( @@ -1999,7 +2000,6 @@ SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'partitioning_schema' AND tablename ilike '%part_table_with_%' ORDER BY 1, 2; \c - - - :master_port -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); DROP SCHEMA partitioning_schema CASCADE; RESET search_path; DROP TABLE IF EXISTS diff --git a/src/test/regress/sql/multi_reference_table.sql b/src/test/regress/sql/multi_reference_table.sql index f37b17ba0..d82e57e6b 100644 --- a/src/test/regress/sql/multi_reference_table.sql +++ b/src/test/regress/sql/multi_reference_table.sql @@ -888,6 +888,7 @@ DROP INDEX reference_schema.reference_index_2; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='reference_schema.reference_table_ddl_1250019'::regclass; \di reference_schema.reference_index_2* \c - - - :master_port +SET citus.next_shard_id TO 1255000; -- now test the renaming of the table, and back to the expected name ALTER TABLE reference_schema.reference_table_ddl RENAME TO reference_table_ddl_test; diff --git a/src/test/regress/sql/multi_remove_node_reference_table.sql b/src/test/regress/sql/multi_remove_node_reference_table.sql index a79aa30ed..5dc2f1a7b 100644 --- a/src/test/regress/sql/multi_remove_node_reference_table.sql +++ b/src/test/regress/sql/multi_remove_node_reference_table.sql @@ -14,9 +14,6 @@ ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 1380000; CREATE TABLE tmp_shard_placement AS SELECT * FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; DELETE FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; --- make worker 1 receive metadata changes -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - -- remove non-existing node SELECT master_remove_node('localhost', 55555); @@ -624,8 +621,6 @@ DROP TABLE remove_node_reference_table; DROP TABLE remove_node_reference_table_schema.table1; DROP SCHEMA remove_node_reference_table_schema CASCADE; -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); - -- reload pg_dist_shard_placement table INSERT INTO pg_dist_shard_placement (SELECT * FROM tmp_shard_placement); DROP TABLE tmp_shard_placement; diff --git a/src/test/regress/sql/multi_schema_support.sql b/src/test/regress/sql/multi_schema_support.sql index c3866c843..b472f0083 100644 --- a/src/test/regress/sql/multi_schema_support.sql +++ b/src/test/regress/sql/multi_schema_support.sql @@ -130,8 +130,11 @@ CREATE TABLE test_schema_support.nation_hash( n_regionkey integer not null, n_comment varchar(152) ); -SELECT master_create_distributed_table('test_schema_support.nation_hash', 'n_nationkey', 'hash'); -SELECT master_create_worker_shards('test_schema_support.nation_hash', 4, 2); + +SET citus.shard_replication_factor TO 2; +SELECT create_distributed_table('test_schema_support.nation_hash', 'n_nationkey', shard_count:=4); + +RESET citus.shard_replication_factor; -- test cursors @@ -787,6 +790,7 @@ SELECT * FROM new_schema.table_set_schema; DROP SCHEMA new_schema CASCADE; +SET citus.next_shard_id TO 1195000; -- test ALTER TABLE SET SCHEMA when a search path is set CREATE SCHEMA old_schema; @@ -822,6 +826,8 @@ SELECT table_schema AS "Shards' Schema", COUNT(*) AS "Counts" \c - - - :master_port SELECT * FROM new_schema.table_set_schema; +SET citus.next_shard_id TO 1196000; + SET search_path to public; DROP SCHEMA old_schema CASCADE; DROP SCHEMA new_schema CASCADE; @@ -892,6 +898,8 @@ SELECT create_reference_table('schema_with_user.test_table'); \c - - - :master_port +SET citus.next_shard_id TO 1197000; + -- we do not use run_command_on_coordinator_and_workers here because when there is CASCADE, it causes deadlock DROP OWNED BY "test-user" CASCADE; SELECT run_command_on_workers('DROP OWNED BY "test-user" CASCADE'); @@ -1017,9 +1025,11 @@ BEGIN; ROLLBACK; -- Clean up the created schema +SET client_min_messages TO WARNING; DROP SCHEMA run_test_schema CASCADE; DROP SCHEMA test_schema_support_join_1 CASCADE; DROP SCHEMA test_schema_support_join_2 CASCADE; DROP SCHEMA "Citus'Teen123" CASCADE; DROP SCHEMA "CiTUS.TEEN2" CASCADE; DROP SCHEMA bar CASCADE; +DROP SCHEMA test_schema_support CASCADE; diff --git a/src/test/regress/sql/multi_transactional_drop_shards.sql b/src/test/regress/sql/multi_transactional_drop_shards.sql index edbce4f13..f88839d77 100644 --- a/src/test/regress/sql/multi_transactional_drop_shards.sql +++ b/src/test/regress/sql/multi_transactional_drop_shards.sql @@ -5,6 +5,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1410000; +ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 4230000; SET citus.shard_count TO 4; SET client_min_messages TO WARNING; @@ -33,6 +34,7 @@ ORDER BY -- verify shards are not dropped \c - - - :worker_1_port +SET citus.override_table_visibility TO false; \dt transactional_drop_shards_* \c - - - :master_port @@ -58,6 +60,7 @@ ORDER BY -- verify shards are dropped \c - - - :worker_1_port +SET citus.override_table_visibility TO false; \dt transactional_drop_shards_* \c - - - :master_port @@ -84,6 +87,7 @@ ORDER BY -- verify shards are not dropped \c - - - :worker_1_port +SET citus.override_table_visibility TO false; \dt transactional_drop_shards_* \c - - - :master_port @@ -106,6 +110,7 @@ ORDER BY -- verify shards are dropped \c - - - :worker_1_port +SET citus.override_table_visibility TO false; \dt transactional_drop_shards_* \c - - - :master_port @@ -134,6 +139,7 @@ ORDER BY -- verify shards are not dropped \c - - - :worker_1_port +SET citus.override_table_visibility TO false; \dt transactional_drop_shards_* \c - - - :master_port @@ -157,6 +163,7 @@ ORDER BY -- verify shards are not dropped \c - - - :worker_1_port +SET citus.override_table_visibility TO false; \dt transactional_drop_shards_* @@ -191,6 +198,7 @@ ORDER BY -- verify shards are not dropped \c - - - :worker_1_port +SET citus.override_table_visibility TO false; \dt transactional_drop_shards_* \c - - - :master_port @@ -219,7 +227,8 @@ ORDER BY -- verify shards are not dropped \c - - - :worker_1_port -\dt transactional_drop_reference* +SET citus.override_table_visibility TO false; +\dt transactional_drop_reference_* \c - - - :master_port @@ -241,6 +250,7 @@ ORDER BY -- verify shards are not dropped \c - - - :worker_1_port +SET citus.override_table_visibility TO false; \dt transactional_drop_shards_* DROP EVENT TRIGGER fail_drop_table; \c - - - :master_port @@ -272,6 +282,7 @@ ORDER BY -- verify shards and sequence are not dropped \c - - - :worker_1_port +SET citus.override_table_visibility TO false; \dt transactional_drop_serial_* \ds transactional_drop_serial_column2_seq \c - - - :master_port @@ -298,6 +309,7 @@ ORDER BY -- verify shards and sequence are dropped \c - - - :worker_1_port +SET citus.override_table_visibility TO false; \dt transactional_drop_serial_* \ds transactional_drop_serial_column2_seq \c - - - :master_port @@ -366,7 +378,7 @@ ORDER BY SET client_min_messages TO WARNING; -- try using the coordinator as a worker and then dropping the table -SELECT 1 FROM master_add_node('localhost', :master_port); +SELECT 1 FROM master_add_node('localhost', :master_port, groupid := 0); CREATE TABLE citus_local (id serial, k int); SELECT create_distributed_table('citus_local', 'id'); INSERT INTO citus_local (k) VALUES (2); @@ -375,7 +387,6 @@ SELECT master_remove_node('localhost', :master_port); -- clean the workspace DROP TABLE transactional_drop_shards, transactional_drop_reference; -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); -- test DROP TABLE as a non-superuser in a transaction block CREATE USER try_drop_table WITH LOGIN; diff --git a/src/test/regress/sql/multi_unsupported_worker_operations.sql b/src/test/regress/sql/multi_unsupported_worker_operations.sql index a59337200..1fbc9aa82 100644 --- a/src/test/regress/sql/multi_unsupported_worker_operations.sql +++ b/src/test/regress/sql/multi_unsupported_worker_operations.sql @@ -166,8 +166,8 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; \c - - - :worker_2_port -SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition; -SELECT count(*) FROM pg_dist_partition; +SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx\_%table%'; +SELECT count(*) FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx\_%table%'; SELECT count(*) FROM pg_dist_node; \c - - - :worker_1_port @@ -229,7 +229,7 @@ DROP TABLE mx_table_2; SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); \c - - - :worker_1_port DELETE FROM pg_dist_node; -SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition; +SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx\_%table%'; \c - - - :master_port ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART :last_colocation_id; diff --git a/src/test/regress/sql/mx_regular_user.sql b/src/test/regress/sql/mx_regular_user.sql index f533406cb..ed8c50b07 100644 --- a/src/test/regress/sql/mx_regular_user.sql +++ b/src/test/regress/sql/mx_regular_user.sql @@ -19,6 +19,7 @@ GRANT ALL ON SCHEMA "Mx Regular User" TO regular_mx_user; CREATE SCHEMA "Mx Super User"; SELECT 1 FROM run_command_on_workers($$CREATE SCHEMA "Mx Super User";$$); SET citus.next_shard_id TO 2980000; +SET citus.next_placement_id TO 2980000; SET search_path TO "Mx Super User"; CREATE TABLE super_user_owned_regular_user_granted (a int PRIMARY KEY, b int); SELECT create_reference_table ('"Mx Super User".super_user_owned_regular_user_granted'); @@ -118,6 +119,7 @@ GRANT ALL ON SCHEMA "Mx Regular User" TO regular_mx_user; \c - regular_mx_user - :master_port SET search_path TO "Mx Regular User"; SET citus.next_shard_id TO 1560000; +SET citus.next_placement_id TO 1560000; -- make sure that we sync the metadata SET citus.shard_replication_factor TO 1; diff --git a/src/test/regress/sql/node_conninfo_reload.sql b/src/test/regress/sql/node_conninfo_reload.sql index cd76399b5..5a4f3c75b 100644 --- a/src/test/regress/sql/node_conninfo_reload.sql +++ b/src/test/regress/sql/node_conninfo_reload.sql @@ -4,6 +4,7 @@ SET search_path TO node_conninfo_reload; SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; SET citus.force_max_query_parallelization TO ON; +SET citus.next_shard_id TO 278000; create table test(a int); select create_distributed_table('test', 'a'); diff --git a/src/test/regress/sql/partitioning_issue_3970.sql b/src/test/regress/sql/partitioning_issue_3970.sql index 13190820c..b95f2a2ad 100644 --- a/src/test/regress/sql/partitioning_issue_3970.sql +++ b/src/test/regress/sql/partitioning_issue_3970.sql @@ -46,7 +46,7 @@ ORDER BY 1,2,3; \c - - - :worker_1_port SELECT relname, conname, pg_catalog.pg_get_constraintdef(con.oid, true) FROM pg_constraint con JOIN pg_class rel ON (rel.oid=con.conrelid) -WHERE relname LIKE 'part_table%' +WHERE relname SIMILAR TO 'part_table%\_\d%' ORDER BY 1,2,3; \c - - - :master_port