From 71c686f3898ed8ee0f35dbce379cbc6affae04e1 Mon Sep 17 00:00:00 2001 From: Sameer Awasekar Date: Wed, 3 Aug 2022 12:55:25 +0530 Subject: [PATCH] Added cleanup test --- ...citus_non_blocking_split_shard_cleanup.out | 125 ++++++++++++++++++ src/test/regress/split_schedule | 1 + ...itus_non_blocking_split_shard_cleanup.sql} | 53 +++++--- 3 files changed, 160 insertions(+), 19 deletions(-) create mode 100644 src/test/regress/expected/citus_non_blocking_split_shard_cleanup.out rename src/test/regress/sql/{citus_non_blocking_shard_split_cleanup.sql => citus_non_blocking_split_shard_cleanup.sql} (60%) diff --git a/src/test/regress/expected/citus_non_blocking_split_shard_cleanup.out b/src/test/regress/expected/citus_non_blocking_split_shard_cleanup.out new file mode 100644 index 000000000..ee60bfe8a --- /dev/null +++ b/src/test/regress/expected/citus_non_blocking_split_shard_cleanup.out @@ -0,0 +1,125 @@ +/* +Citus non-blocking shard split test. +The test ensures after a error free non-blocking split operation, following objects +created for logical replication are cleaned up: +1) At source node: Publications, replication slots, dummy shards. +2) At destination node: Subscriptions, dummy shards. +*/ +CREATE SCHEMA "citus_split_test_schema"; +CREATE ROLE test_split_role WITH LOGIN; +ERROR: role "test_split_role" already exists +GRANT USAGE, CREATE ON SCHEMA "citus_split_test_schema" TO test_split_role; +SET ROLE test_split_role; +SET search_path TO "citus_split_test_schema"; +SET citus.next_shard_id TO 8981000; +SET citus.next_placement_id TO 8610000; +SET citus.shard_count TO 2; +SET citus.shard_replication_factor TO 1; +-- BEGIN: Create table to split, along with other co-located tables. Add indexes, statistics etc. +CREATE TABLE sensors( + measureid integer, + eventdatetime date, + measure_data jsonb, + meaure_quantity decimal(15, 2), + measure_status char(1), + measure_comment varchar(44), + PRIMARY KEY (measureid, eventdatetime, measure_data)); +SELECT create_distributed_table('sensors', 'measureid', colocate_with:='none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO sensors SELECT i, '2020-01-05', '{}', 11011.10, 'A', 'I <3 Citus' FROM generate_series(0,1000)i; +-- BEGIN : Move one shard before we split it. +\c - postgres - :master_port +SET ROLE test_split_role; +SET search_path TO "citus_split_test_schema"; +SET citus.next_shard_id TO 8981007; +SET citus.defer_drop_after_shard_move TO OFF; +-- BEGIN : Set node id variables +SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset +SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset +-- END : Set node id variables +-- BEGIN : Split two shards +-- Perform 2 way split +SELECT pg_catalog.citus_split_shard_by_split_points( + 8981000, + ARRAY['-1073741824'], + ARRAY[:worker_2_node, :worker_2_node], + 'force_logical'); + citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +\c - - - :worker_1_port +SET search_path TO "citus_split_test_schema"; +SET citus.show_shards_for_app_name_prefixes = '*'; +-- Dummy shards should be cleaned up. 8981007, 8981008 are dummy shards +-- created at source. +SELECT count(*) FROM pg_class where relname like '%sensors_8981007%'; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT count(*) FROM pg_class where relname like '%sensors_8981008%'; + count +--------------------------------------------------------------------- + 0 +(1 row) + +-- Replication slots should be cleanedup at source +SELECT slot_name FROM pg_replication_slots; + slot_name +--------------------------------------------------------------------- +(0 rows) + +-- Publications should be cleaned up on worker1 +SELECT * FROM pg_publication; + oid | pubname | pubowner | puballtables | pubinsert | pubupdate | pubdelete | pubtruncate | pubviaroot +--------------------------------------------------------------------- +(0 rows) + +\c - - - :worker_2_port +SET search_path TO "citus_split_test_schema"; +-- All subscriptions should be cleaned up. +SELECT * FROM pg_subscription; + oid | subdbid | subname | subowner | subenabled | subbinary | substream | subconninfo | subslotname | subsynccommit | subpublications +--------------------------------------------------------------------- +(0 rows) + +-- Trigger a 3-way local split. +\c - - - :master_port +SET search_path TO "citus_split_test_schema"; +SELECT pg_catalog.citus_split_shard_by_split_points( + 8981001, + ARRAY['536870911', '1610612735'], + ARRAY[:worker_2_node, :worker_2_node, :worker_2_node], + 'force_logical'); + citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +\c - - - :worker_2_port +SET search_path TO "citus_split_test_schema"; +-- Replication slots should be cleaned up +SELECT slot_name FROM pg_replication_slots; + slot_name +--------------------------------------------------------------------- +(0 rows) + +-- Publications should be cleanedup +SELECT * FROM pg_publication; + oid | pubname | pubowner | puballtables | pubinsert | pubupdate | pubdelete | pubtruncate | pubviaroot +--------------------------------------------------------------------- +(0 rows) + +-- All subscriptions should be cleaned up. +SELECT * FROM pg_subscription; + oid | subdbid | subname | subowner | subenabled | subbinary | substream | subconninfo | subslotname | subsynccommit | subpublications +--------------------------------------------------------------------- +(0 rows) + diff --git a/src/test/regress/split_schedule b/src/test/regress/split_schedule index 5596e677c..58e2ee954 100644 --- a/src/test/regress/split_schedule +++ b/src/test/regress/split_schedule @@ -22,3 +22,4 @@ test: citus_split_shard_by_split_points_failure test: citus_split_shard_columnar_partitioned test: citus_non_blocking_split_shards test: citus_non_blocking_split_shard_partitioned +test: citus_non_blocking_split_shard_cleanup \ No newline at end of file diff --git a/src/test/regress/sql/citus_non_blocking_shard_split_cleanup.sql b/src/test/regress/sql/citus_non_blocking_split_shard_cleanup.sql similarity index 60% rename from src/test/regress/sql/citus_non_blocking_shard_split_cleanup.sql rename to src/test/regress/sql/citus_non_blocking_split_shard_cleanup.sql index e122221cd..79d12981e 100644 --- a/src/test/regress/sql/citus_non_blocking_shard_split_cleanup.sql +++ b/src/test/regress/sql/citus_non_blocking_split_shard_cleanup.sql @@ -1,14 +1,9 @@ /* -Citus Shard Split Test.The test is model similar to 'shard_move_constraints'. -Here is a high level overview of test plan: - 1. Create a table 'sensors' (ShardCount = 2) to be split. Add indexes and statistics on this table. - 2. Create two other tables: 'reference_table' and 'colocated_dist_table', co-located with sensors. - 3. Create Foreign key constraints between the two co-located distributed tables. - 4. Load data into the three tables. - 5. Move one of the shards for 'sensors' to test ShardMove -> Split. - 6. Trigger Split on both shards of 'sensors'. This will also split co-located tables. - 7. Move one of the split shard to test Split -> ShardMove. - 8. Split an already split shard second time on a different schema. +Citus non-blocking shard split test. +The test ensures after a error free non-blocking split operation, following objects +created for logical replication are cleaned up: +1) At source node: Publications, replication slots, dummy shards. +2) At destination node: Subscriptions, dummy shards. */ CREATE SCHEMA "citus_split_test_schema"; @@ -35,7 +30,6 @@ CREATE TABLE sensors( SELECT create_distributed_table('sensors', 'measureid', colocate_with:='none'); INSERT INTO sensors SELECT i, '2020-01-05', '{}', 11011.10, 'A', 'I <3 Citus' FROM generate_series(0,1000)i; --- END: Create table to split, along with other co-located tables. Add indexes, statistics etc. -- BEGIN : Move one shard before we split it. @@ -45,34 +39,55 @@ SET search_path TO "citus_split_test_schema"; SET citus.next_shard_id TO 8981007; SET citus.defer_drop_after_shard_move TO OFF; -SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); --- END : Move one shard before we split it. - -- BEGIN : Set node id variables SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset -- END : Set node id variables --- BEGIN : Split two shards : One with move and One without move. +-- BEGIN : Split two shards -- Perform 2 way split -SELECT * FROM citus_shards WHERE nodeport IN (:worker_1_port, :worker_2_port); SELECT pg_catalog.citus_split_shard_by_split_points( 8981000, ARRAY['-1073741824'], ARRAY[:worker_2_node, :worker_2_node], 'force_logical'); -SELECT * FROM citus_shards WHERE nodeport IN (:worker_1_port, :worker_2_port); -\c - - - :worker_2_port + +\c - - - :worker_1_port +SET search_path TO "citus_split_test_schema"; +SET citus.show_shards_for_app_name_prefixes = '*'; + +-- Dummy shards should be cleaned up. 8981007, 8981008 are dummy shards +-- created at source. +SELECT count(*) FROM pg_class where relname like '%sensors_8981007%'; +SELECT count(*) FROM pg_class where relname like '%sensors_8981008%'; + +-- Replication slots should be cleanedup at source SELECT slot_name FROM pg_replication_slots; +-- Publications should be cleaned up on worker1 +SELECT * FROM pg_publication; + +\c - - - :worker_2_port +SET search_path TO "citus_split_test_schema"; +-- All subscriptions should be cleaned up. +SELECT * FROM pg_subscription; + +-- Trigger a 3-way local split. \c - - - :master_port +SET search_path TO "citus_split_test_schema"; SELECT pg_catalog.citus_split_shard_by_split_points( 8981001, ARRAY['536870911', '1610612735'], ARRAY[:worker_2_node, :worker_2_node, :worker_2_node], 'force_logical'); -SELECT * FROM citus_shards WHERE nodeport IN (:worker_1_port, :worker_2_port); \c - - - :worker_2_port +SET search_path TO "citus_split_test_schema"; +-- Replication slots should be cleaned up SELECT slot_name FROM pg_replication_slots; + +-- Publications should be cleanedup +SELECT * FROM pg_publication; +-- All subscriptions should be cleaned up. +SELECT * FROM pg_subscription; \ No newline at end of file