mirror of https://github.com/citusdata/citus.git
Fix split schedule
parent
497a7589d0
commit
acf3539a90
|
@ -73,6 +73,8 @@ elif "isolation" in test_schedule:
|
||||||
test_schedule = 'base_isolation_schedule'
|
test_schedule = 'base_isolation_schedule'
|
||||||
elif "failure" in test_schedule:
|
elif "failure" in test_schedule:
|
||||||
test_schedule = 'failure_base_schedule'
|
test_schedule = 'failure_base_schedule'
|
||||||
|
elif "split" in test_schedule:
|
||||||
|
test_schedule = 'minimal_schedule'
|
||||||
elif "mx" in test_schedule:
|
elif "mx" in test_schedule:
|
||||||
if use_base_schedule:
|
if use_base_schedule:
|
||||||
test_schedule = 'mx_base_schedule'
|
test_schedule = 'mx_base_schedule'
|
||||||
|
|
|
@ -1,10 +0,0 @@
|
||||||
# Split Shard tests.
|
|
||||||
# Include tests from 'minimal_schedule' for setup.
|
|
||||||
test: multi_test_helpers multi_test_helpers_superuser columnar_test_helpers
|
|
||||||
test: multi_cluster_management
|
|
||||||
test: multi_test_catalog_views
|
|
||||||
test: tablespace
|
|
||||||
# Split tests go here.
|
|
||||||
test: citus_split_shard_by_split_points_negative
|
|
||||||
test: citus_split_shard_by_split_points
|
|
||||||
test: citus_split_shard_by_split_points_deferred_drop
|
|
|
@ -1,123 +0,0 @@
|
||||||
/*
|
|
||||||
Citus Shard Split Test.The test is model similar to 'shard_move_constraints'.
|
|
||||||
Here is a high level overview of test plan:
|
|
||||||
1. Create a table 'sensors' (ShardCount = 2) to be split. Add indexes and statistics on this table.
|
|
||||||
2. Create two other tables: 'reference_table' and 'colocated_dist_table', co-located with sensors.
|
|
||||||
3. Create Foreign key constraints between the two co-located distributed tables.
|
|
||||||
4. Load data into the three tables.
|
|
||||||
5. Move one of the shards for 'sensors' to test ShardMove -> Split.
|
|
||||||
6. Trigger Split on both shards of 'sensors'. This will also split co-located tables.
|
|
||||||
7. Move one of the split shard to test Split -> ShardMove.
|
|
||||||
8. Split an already split shard second time on a different schema.
|
|
||||||
*/
|
|
||||||
CREATE SCHEMA "citus_split_test_schema";
|
|
||||||
CREATE ROLE test_split_role WITH LOGIN;
|
|
||||||
GRANT USAGE, CREATE ON SCHEMA "citus_split_test_schema" TO test_split_role;
|
|
||||||
SET ROLE test_split_role;
|
|
||||||
SET search_path TO "citus_split_test_schema";
|
|
||||||
SET citus.next_shard_id TO 8981000;
|
|
||||||
SET citus.next_placement_id TO 8610000;
|
|
||||||
SET citus.shard_count TO 2;
|
|
||||||
SET citus.shard_replication_factor TO 1;
|
|
||||||
-- BEGIN: Create table to split, along with other co-located tables. Add indexes, statistics etc.
|
|
||||||
CREATE TABLE sensors(
|
|
||||||
measureid integer,
|
|
||||||
eventdatetime date,
|
|
||||||
measure_data jsonb,
|
|
||||||
meaure_quantity decimal(15, 2),
|
|
||||||
measure_status char(1),
|
|
||||||
measure_comment varchar(44),
|
|
||||||
PRIMARY KEY (measureid, eventdatetime, measure_data));
|
|
||||||
SELECT create_distributed_table('sensors', 'measureid', colocate_with:='none');
|
|
||||||
create_distributed_table
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
INSERT INTO sensors SELECT i, '2020-01-05', '{}', 11011.10, 'A', 'I <3 Citus' FROM generate_series(0,1000)i;
|
|
||||||
-- END: Create table to split, along with other co-located tables. Add indexes, statistics etc.
|
|
||||||
-- BEGIN : Move one shard before we split it.
|
|
||||||
\c - postgres - :master_port
|
|
||||||
SET ROLE test_split_role;
|
|
||||||
SET search_path TO "citus_split_test_schema";
|
|
||||||
SET citus.next_shard_id TO 8981007;
|
|
||||||
SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical');
|
|
||||||
citus_move_shard_placement
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
-- END : Move one shard before we split it.
|
|
||||||
-- BEGIN : Set node id variables
|
|
||||||
SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset
|
|
||||||
SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset
|
|
||||||
-- END : Set node id variables
|
|
||||||
-- BEGIN : Split two shards : One with move and One without move.
|
|
||||||
-- Perform 2 way split
|
|
||||||
SELECT * FROM citus_shards WHERE nodeport IN (:worker_1_port, :worker_2_port);
|
|
||||||
table_name | shardid | shard_name | citus_table_type | colocation_id | nodename | nodeport | shard_size
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
sensors | 8981000 | citus_split_test_schema.sensors_8981000 | distributed | 1390009 | localhost | 57638 | 40960
|
|
||||||
sensors | 8981001 | citus_split_test_schema.sensors_8981001 | distributed | 1390009 | localhost | 57638 | 40960
|
|
||||||
(2 rows)
|
|
||||||
|
|
||||||
SELECT pg_catalog.citus_split_shard_by_split_points(
|
|
||||||
8981000,
|
|
||||||
ARRAY['-1073741824'],
|
|
||||||
ARRAY[:worker_2_node, :worker_2_node],
|
|
||||||
'force_logical');
|
|
||||||
WARNING: replication slot "citus_shard_split_template_slot_8981000" does not exist
|
|
||||||
CONTEXT: while executing command on localhost:xxxxx
|
|
||||||
citus_split_shard_by_split_points
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
SELECT * FROM citus_shards WHERE nodeport IN (:worker_1_port, :worker_2_port);
|
|
||||||
table_name | shardid | shard_name | citus_table_type | colocation_id | nodename | nodeport | shard_size
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
sensors | 8981001 | citus_split_test_schema.sensors_8981001 | distributed | 1390009 | localhost | 57638 | 40960
|
|
||||||
sensors | 8981007 | citus_split_test_schema.sensors_8981007 | distributed | 1390009 | localhost | 57638 | 24576
|
|
||||||
sensors | 8981008 | citus_split_test_schema.sensors_8981008 | distributed | 1390009 | localhost | 57638 | 24576
|
|
||||||
(3 rows)
|
|
||||||
|
|
||||||
\c - - - :worker_2_port
|
|
||||||
SELECT slot_name FROM pg_replication_slots;
|
|
||||||
slot_name
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
citus_shard_split_template_slot_8981000
|
|
||||||
citus_shard_split_18_20648
|
|
||||||
(2 rows)
|
|
||||||
|
|
||||||
\c - - - :master_port
|
|
||||||
SELECT pg_catalog.citus_split_shard_by_split_points(
|
|
||||||
8981001,
|
|
||||||
ARRAY['536870911', '1610612735'],
|
|
||||||
ARRAY[:worker_1_node, :worker_1_node, :worker_2_node],
|
|
||||||
'force_logical');
|
|
||||||
WARNING: replication slot "citus_shard_split_template_slot_8981001" does not exist
|
|
||||||
CONTEXT: while executing command on localhost:xxxxx
|
|
||||||
citus_split_shard_by_split_points
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
SELECT * FROM citus_shards WHERE nodeport IN (:worker_1_port, :worker_2_port);
|
|
||||||
table_name | shardid | shard_name | citus_table_type | colocation_id | nodename | nodeport | shard_size
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
citus_split_test_schema.sensors | 102042 | citus_split_test_schema.sensors_102042 | distributed | 1390009 | localhost | 57637 | 8192
|
|
||||||
citus_split_test_schema.sensors | 102043 | citus_split_test_schema.sensors_102043 | distributed | 1390009 | localhost | 57637 | 16384
|
|
||||||
citus_split_test_schema.sensors | 102044 | citus_split_test_schema.sensors_102044 | distributed | 1390009 | localhost | 57638 | 16384
|
|
||||||
citus_split_test_schema.sensors | 8981007 | citus_split_test_schema.sensors_8981007 | distributed | 1390009 | localhost | 57638 | 24576
|
|
||||||
citus_split_test_schema.sensors | 8981008 | citus_split_test_schema.sensors_8981008 | distributed | 1390009 | localhost | 57638 | 24576
|
|
||||||
(5 rows)
|
|
||||||
|
|
||||||
\c - - - :worker_2_port
|
|
||||||
SELECT slot_name FROM pg_replication_slots;
|
|
||||||
slot_name
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
citus_shard_split_template_slot_8981001
|
|
||||||
citus_shard_split_16_20648
|
|
||||||
citus_shard_split_18_20648
|
|
||||||
(3 rows)
|
|
||||||
|
|
|
@ -498,4 +498,5 @@ DETAIL: drop cascades to table citus_split_test_schema.sensors
|
||||||
drop cascades to table citus_split_test_schema.reference_table
|
drop cascades to table citus_split_test_schema.reference_table
|
||||||
drop cascades to table citus_split_test_schema.colocated_dist_table
|
drop cascades to table citus_split_test_schema.colocated_dist_table
|
||||||
drop cascades to table citus_split_test_schema.table_with_index_rep_identity
|
drop cascades to table citus_split_test_schema.table_with_index_rep_identity
|
||||||
|
DROP USER test_split_role;
|
||||||
--END : Cleanup
|
--END : Cleanup
|
||||||
|
|
|
@ -13,9 +13,9 @@ SELECT * from pg_dist_cleanup;
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
(0 rows)
|
(0 rows)
|
||||||
|
|
||||||
-- Set a very long(10mins) time interval to stop auto cleanup in case of deferred drop.
|
-- Disable Deferred drop auto cleanup to avoid flaky tests.
|
||||||
\c - postgres - :master_port
|
\c - postgres - :master_port
|
||||||
ALTER SYSTEM SET citus.defer_shard_delete_interval TO 600000;
|
ALTER SYSTEM SET citus.defer_shard_delete_interval TO -1;
|
||||||
SELECT pg_reload_conf();
|
SELECT pg_reload_conf();
|
||||||
pg_reload_conf
|
pg_reload_conf
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
|
@ -28,8 +28,9 @@ SET citus.next_placement_id TO 8610000;
|
||||||
SET citus.shard_count TO 2;
|
SET citus.shard_count TO 2;
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
SET citus.next_operation_id TO 777;
|
SET citus.next_operation_id TO 777;
|
||||||
SET citus.next_cleanup_record_id TO 11;
|
SET citus.next_cleanup_record_id TO 511;
|
||||||
SET ROLE test_split_role;
|
SET ROLE test_split_role;
|
||||||
|
SET search_path TO "citus_split_shard_by_split_points_deferred_schema";
|
||||||
CREATE TABLE table_to_split(id int PRIMARY KEY, int_data int, data text);
|
CREATE TABLE table_to_split(id int PRIMARY KEY, int_data int, data text);
|
||||||
SELECT create_distributed_table('table_to_split', 'id');
|
SELECT create_distributed_table('table_to_split', 'id');
|
||||||
create_distributed_table
|
create_distributed_table
|
||||||
|
@ -61,26 +62,25 @@ SELECT pg_catalog.citus_split_shard_by_split_points(
|
||||||
|
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- The original shards are marked for deferred drop with policy_type = 2.
|
-- The original shard is marked for deferred drop with policy_type = 2.
|
||||||
|
-- The previous shard should be dropped at the beginning of the second split call
|
||||||
SELECT * from pg_dist_cleanup;
|
SELECT * from pg_dist_cleanup;
|
||||||
record_id | operation_id | object_type | object_name | node_group_id | policy_type
|
record_id | operation_id | object_type | object_name | node_group_id | policy_type
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
11 | 777 | 1 | public.table_to_split_8981000 | 14 | 2
|
512 | 778 | 1 | citus_split_shard_by_split_points_deferred_schema.table_to_split_8981001 | 16 | 2
|
||||||
12 | 778 | 1 | public.table_to_split_8981001 | 16 | 2
|
(1 row)
|
||||||
(2 rows)
|
|
||||||
|
|
||||||
-- The physical shards should not be deleted.
|
-- One of the physical shards should not be deleted, the other one should.
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
SELECT relname FROM pg_class where relname LIKE '%table_to_split_%' AND relkind = 'r';
|
SELECT relname FROM pg_class where relname LIKE '%table_to_split_%' AND relkind = 'r' ORDER BY relname;
|
||||||
relname
|
relname
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
table_to_split_8981000
|
|
||||||
table_to_split_9999000
|
table_to_split_9999000
|
||||||
table_to_split_9999002
|
table_to_split_9999002
|
||||||
(3 rows)
|
(2 rows)
|
||||||
|
|
||||||
\c - - - :worker_2_port
|
\c - - - :worker_2_port
|
||||||
SELECT relname FROM pg_class where relname LIKE '%table_to_split_%' AND relkind = 'r';
|
SELECT relname FROM pg_class where relname LIKE '%table_to_split_%' AND relkind = 'r' ORDER BY relname;
|
||||||
relname
|
relname
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
table_to_split_8981001
|
table_to_split_8981001
|
||||||
|
@ -88,30 +88,19 @@ SELECT relname FROM pg_class where relname LIKE '%table_to_split_%' AND relkind
|
||||||
table_to_split_9999003
|
table_to_split_9999003
|
||||||
(3 rows)
|
(3 rows)
|
||||||
|
|
||||||
-- Set a very short(1ms) time interval to force deferred drop cleanup.
|
-- Perform deferred drop cleanup.
|
||||||
\c - postgres - :master_port
|
\c - postgres - :master_port
|
||||||
ALTER SYSTEM SET citus.defer_shard_delete_interval TO 1;
|
CALL citus_cleanup_orphaned_resources();
|
||||||
SELECT pg_reload_conf();
|
NOTICE: cleaned up 1 orphaned resources
|
||||||
pg_reload_conf
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
t
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
-- Give enough time for the deferred drop cleanup to run.
|
|
||||||
SELECT pg_sleep(2);
|
|
||||||
pg_sleep
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
-- Clenaup has been done.
|
-- Clenaup has been done.
|
||||||
SELECT * from pg_dist_cleanup;
|
SELECT * from pg_dist_cleanup;
|
||||||
record_id | operation_id | object_type | object_name | node_group_id | policy_type
|
record_id | operation_id | object_type | object_name | node_group_id | policy_type
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
(0 rows)
|
(0 rows)
|
||||||
|
|
||||||
|
-- Verify that the shard to be dropped is dropped
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
SELECT relname FROM pg_class where relname LIKE '%table_to_split_%' AND relkind = 'r';
|
SELECT relname FROM pg_class where relname LIKE '%table_to_split_%' AND relkind = 'r' ORDER BY relname;
|
||||||
relname
|
relname
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
table_to_split_9999000
|
table_to_split_9999000
|
||||||
|
@ -119,7 +108,7 @@ SELECT relname FROM pg_class where relname LIKE '%table_to_split_%' AND relkind
|
||||||
(2 rows)
|
(2 rows)
|
||||||
|
|
||||||
\c - - - :worker_2_port
|
\c - - - :worker_2_port
|
||||||
SELECT relname FROM pg_class where relname LIKE '%table_to_split_%' AND relkind = 'r';
|
SELECT relname FROM pg_class where relname LIKE '%table_to_split_%' AND relkind = 'r' ORDER BY relname;
|
||||||
relname
|
relname
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
table_to_split_9999001
|
table_to_split_9999001
|
||||||
|
@ -128,5 +117,6 @@ SELECT relname FROM pg_class where relname LIKE '%table_to_split_%' AND relkind
|
||||||
|
|
||||||
-- Test Cleanup
|
-- Test Cleanup
|
||||||
\c - postgres - :master_port
|
\c - postgres - :master_port
|
||||||
|
SET client_min_messages TO WARNING;
|
||||||
DROP SCHEMA "citus_split_shard_by_split_points_deferred_schema" CASCADE;
|
DROP SCHEMA "citus_split_shard_by_split_points_deferred_schema" CASCADE;
|
||||||
NOTICE: drop cascades to table citus_split_shard_by_split_points_deferred_schema.temp_table
|
DROP USER test_split_role;
|
||||||
|
|
|
@ -43,3 +43,8 @@ SELECT count(*) FROM pg_catalog.worker_split_shard_replication_setup(ARRAY[
|
||||||
1
|
1
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
|
-- cleanup, we are done with these manually created test tables
|
||||||
|
DROP TABLE table_to_split_1, table_to_split_2, table_to_split_3;
|
||||||
|
\c - - - :worker_2_port
|
||||||
|
SET search_path TO split_shard_replication_setup_schema;
|
||||||
|
DROP TABLE table_to_split_1, table_to_split_2, table_to_split_3;
|
||||||
|
|
|
@ -17,6 +17,7 @@ test: worker_split_binary_copy_test
|
||||||
test: worker_split_text_copy_test
|
test: worker_split_text_copy_test
|
||||||
test: citus_split_shard_by_split_points_negative
|
test: citus_split_shard_by_split_points_negative
|
||||||
test: citus_split_shard_by_split_points
|
test: citus_split_shard_by_split_points
|
||||||
|
test: citus_split_shard_by_split_points_deferred_drop
|
||||||
test: citus_split_shard_by_split_points_failure
|
test: citus_split_shard_by_split_points_failure
|
||||||
# Name citus_split_shard_by_split_points_columnar_partitioned was too long and being truncated.
|
# Name citus_split_shard_by_split_points_columnar_partitioned was too long and being truncated.
|
||||||
# use citus_split_shard_columnar_partitioned instead.
|
# use citus_split_shard_columnar_partitioned instead.
|
||||||
|
|
|
@ -256,4 +256,5 @@ SELECT COUNT(*) FROM colocated_dist_table;
|
||||||
ALTER SYSTEM RESET citus.defer_shard_delete_interval;
|
ALTER SYSTEM RESET citus.defer_shard_delete_interval;
|
||||||
SELECT pg_reload_conf();
|
SELECT pg_reload_conf();
|
||||||
DROP SCHEMA "citus_split_test_schema" CASCADE;
|
DROP SCHEMA "citus_split_test_schema" CASCADE;
|
||||||
|
DROP USER test_split_role;
|
||||||
--END : Cleanup
|
--END : Cleanup
|
||||||
|
|
|
@ -24,8 +24,9 @@ SET citus.next_placement_id TO 8610000;
|
||||||
SET citus.shard_count TO 2;
|
SET citus.shard_count TO 2;
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
SET citus.next_operation_id TO 777;
|
SET citus.next_operation_id TO 777;
|
||||||
SET citus.next_cleanup_record_id TO 11;
|
SET citus.next_cleanup_record_id TO 511;
|
||||||
SET ROLE test_split_role;
|
SET ROLE test_split_role;
|
||||||
|
SET search_path TO "citus_split_shard_by_split_points_deferred_schema";
|
||||||
|
|
||||||
CREATE TABLE table_to_split(id int PRIMARY KEY, int_data int, data text);
|
CREATE TABLE table_to_split(id int PRIMARY KEY, int_data int, data text);
|
||||||
SELECT create_distributed_table('table_to_split', 'id');
|
SELECT create_distributed_table('table_to_split', 'id');
|
||||||
|
@ -48,29 +49,33 @@ SELECT pg_catalog.citus_split_shard_by_split_points(
|
||||||
ARRAY[:worker_1_node, :worker_2_node],
|
ARRAY[:worker_1_node, :worker_2_node],
|
||||||
'force_logical');
|
'force_logical');
|
||||||
|
|
||||||
-- The original shards are marked for deferred drop with policy_type = 2.
|
-- The original shard is marked for deferred drop with policy_type = 2.
|
||||||
|
-- The previous shard should be dropped at the beginning of the second split call
|
||||||
SELECT * from pg_dist_cleanup;
|
SELECT * from pg_dist_cleanup;
|
||||||
|
|
||||||
-- The physical shards should not be deleted.
|
-- One of the physical shards should not be deleted, the other one should.
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
SELECT relname FROM pg_class where relname LIKE '%table_to_split_%' AND relkind = 'r';
|
SELECT relname FROM pg_class where relname LIKE '%table_to_split_%' AND relkind = 'r' ORDER BY relname;
|
||||||
|
|
||||||
\c - - - :worker_2_port
|
\c - - - :worker_2_port
|
||||||
SELECT relname FROM pg_class where relname LIKE '%table_to_split_%' AND relkind = 'r';
|
SELECT relname FROM pg_class where relname LIKE '%table_to_split_%' AND relkind = 'r' ORDER BY relname;
|
||||||
|
|
||||||
-- Perform deferred drop cleanup.
|
-- Perform deferred drop cleanup.
|
||||||
\c - postgres - :master_port
|
\c - postgres - :master_port
|
||||||
CALL citus_cleanup_orphaned_shards();
|
CALL citus_cleanup_orphaned_resources();
|
||||||
|
|
||||||
-- Clenaup has been done.
|
-- Clenaup has been done.
|
||||||
SELECT * from pg_dist_cleanup;
|
SELECT * from pg_dist_cleanup;
|
||||||
|
|
||||||
|
-- Verify that the shard to be dropped is dropped
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
SELECT relname FROM pg_class where relname LIKE '%table_to_split_%' AND relkind = 'r';
|
SELECT relname FROM pg_class where relname LIKE '%table_to_split_%' AND relkind = 'r' ORDER BY relname;
|
||||||
|
|
||||||
\c - - - :worker_2_port
|
\c - - - :worker_2_port
|
||||||
SELECT relname FROM pg_class where relname LIKE '%table_to_split_%' AND relkind = 'r';
|
SELECT relname FROM pg_class where relname LIKE '%table_to_split_%' AND relkind = 'r' ORDER BY relname;
|
||||||
|
|
||||||
-- Test Cleanup
|
-- Test Cleanup
|
||||||
\c - postgres - :master_port
|
\c - postgres - :master_port
|
||||||
|
SET client_min_messages TO WARNING;
|
||||||
DROP SCHEMA "citus_split_shard_by_split_points_deferred_schema" CASCADE;
|
DROP SCHEMA "citus_split_shard_by_split_points_deferred_schema" CASCADE;
|
||||||
|
DROP USER test_split_role;
|
||||||
|
|
|
@ -26,3 +26,10 @@ SELECT count(*) FROM pg_catalog.worker_split_shard_replication_setup(ARRAY[
|
||||||
ROW(1, 'id', 2, '-2147483648', '-1', :worker_1_node)::pg_catalog.split_shard_info,
|
ROW(1, 'id', 2, '-2147483648', '-1', :worker_1_node)::pg_catalog.split_shard_info,
|
||||||
ROW(1, 'id', 3, '0', '2147483647', :worker_1_node)::pg_catalog.split_shard_info
|
ROW(1, 'id', 3, '0', '2147483647', :worker_1_node)::pg_catalog.split_shard_info
|
||||||
], 0);
|
], 0);
|
||||||
|
|
||||||
|
-- cleanup, we are done with these manually created test tables
|
||||||
|
DROP TABLE table_to_split_1, table_to_split_2, table_to_split_3;
|
||||||
|
|
||||||
|
\c - - - :worker_2_port
|
||||||
|
SET search_path TO split_shard_replication_setup_schema;
|
||||||
|
DROP TABLE table_to_split_1, table_to_split_2, table_to_split_3;
|
||||||
|
|
Loading…
Reference in New Issue