From 72d8d2429bb758fc3d7cf960f314bebb6eb1af2a Mon Sep 17 00:00:00 2001 From: Brian Cloutier Date: Fri, 7 Jul 2017 15:18:37 +0300 Subject: [PATCH] Add a test for upgrading shard placements --- .../distributed/citus--7.0-2--7.0-3.sql | 34 +++++++++---------- .../regress/expected/multi_703_upgrade.out | 29 ++++++++++++++++ src/test/regress/multi_schedule | 1 + src/test/regress/sql/multi_703_upgrade.sql | 25 ++++++++++++++ 4 files changed, 71 insertions(+), 18 deletions(-) create mode 100644 src/test/regress/expected/multi_703_upgrade.out create mode 100644 src/test/regress/sql/multi_703_upgrade.sql diff --git a/src/backend/distributed/citus--7.0-2--7.0-3.sql b/src/backend/distributed/citus--7.0-2--7.0-3.sql index b9ba8e16f..a3d921088 100644 --- a/src/backend/distributed/citus--7.0-2--7.0-3.sql +++ b/src/backend/distributed/citus--7.0-2--7.0-3.sql @@ -25,12 +25,23 @@ CREATE INDEX pg_dist_placement_shardid_index CREATE UNIQUE INDEX pg_dist_placement_placementid_index ON pg_dist_placement USING btree(placementid); +CREATE OR REPLACE FUNCTION citus.find_groupid_for_node(text, int) +RETURNS int AS $$ +DECLARE + groupid int := (SELECT groupid FROM pg_dist_node WHERE nodename = $1 AND nodeport = $2); +BEGIN + IF groupid IS NULL THEN + RAISE EXCEPTION 'There is no node at "%:%"', $1, $2; + ELSE + RETURN groupid; + END IF; +END; +$$ LANGUAGE plpgsql; + INSERT INTO pg_catalog.pg_dist_placement -SELECT placementid, shardid, shardstate, shardlength, node.groupid -FROM pg_dist_shard_placement placement LEFT JOIN pg_dist_node node ON ( - -- use a LEFT JOIN so if the node is missing for some reason we error out - placement.nodename = node.nodename AND placement.nodeport = node.nodeport -); +SELECT placementid, shardid, shardstate, shardlength, + citus.find_groupid_for_node(placement.nodename, placement.nodeport::int) AS groupid +FROM pg_dist_shard_placement placement; DROP TRIGGER dist_placement_cache_invalidate ON pg_catalog.pg_dist_shard_placement; CREATE TRIGGER dist_placement_cache_invalidate @@ -58,19 +69,6 @@ GRANT SELECT ON pg_catalog.pg_dist_shard_placement TO public; ALTER VIEW pg_catalog.pg_dist_shard_placement ALTER placementid SET DEFAULT nextval('pg_dist_placement_placementid_seq'); -CREATE OR REPLACE FUNCTION citus.find_groupid_for_node(text, int) -RETURNS int AS $$ -DECLARE - groupid int := (SELECT groupid FROM pg_dist_node WHERE nodename = $1 AND nodeport = $2); -BEGIN - IF groupid IS NULL THEN - RAISE EXCEPTION 'There is no node at "%:%"', $1, $2; - ELSE - RETURN groupid; - END IF; -END; -$$ LANGUAGE plpgsql; - CREATE OR REPLACE FUNCTION citus.pg_dist_shard_placement_trigger_func() RETURNS TRIGGER AS $$ BEGIN diff --git a/src/test/regress/expected/multi_703_upgrade.out b/src/test/regress/expected/multi_703_upgrade.out new file mode 100644 index 000000000..9c5621b91 --- /dev/null +++ b/src/test/regress/expected/multi_703_upgrade.out @@ -0,0 +1,29 @@ +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 103000; +-- tests that the upgrade from 7.0-2 to 7.0-3 properly migrates shard placements +DROP EXTENSION citus; +SET citus.enable_version_checks TO 'false'; +CREATE EXTENSION citus VERSION '7.0-2'; +INSERT INTO pg_dist_shard_placement + (placementid, shardid, shardstate, shardlength, nodename, nodeport) VALUES + (1, 1, 1, 0, 'localhost', :worker_1_port); +-- if there are no worker nodes which match the shards this should fail +ALTER EXTENSION citus UPDATE TO '7.0-3'; +ERROR: There is no node at "localhost:57637" +CONTEXT: PL/pgSQL function citus.find_groupid_for_node(text,integer) line 6 at RAISE +-- if you add a matching worker the upgrade should succeed +SELECT master_add_node('localhost', :worker_1_port); + master_add_node +----------------------------------- + (1,1,localhost,57637,default,f,t) +(1 row) + +ALTER EXTENSION citus UPDATE TO '7.0-3'; +SELECT * FROM pg_dist_placement; + placementid | shardid | shardstate | shardlength | groupid +-------------+---------+------------+-------------+--------- + 1 | 1 | 1 | 0 | 1 +(1 row) + +-- reset and prepare for the rest of the tests +DROP EXTENSION citus; +CREATE EXTENSION citus; diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule index 8e0e413c1..935e6cefd 100644 --- a/src/test/regress/multi_schedule +++ b/src/test/regress/multi_schedule @@ -16,6 +16,7 @@ # Tests around schema changes, these are run first, so there's no preexisting objects. # --- test: multi_extension +test: multi_703_upgrade test: multi_cluster_management test: multi_test_helpers test: multi_table_ddl diff --git a/src/test/regress/sql/multi_703_upgrade.sql b/src/test/regress/sql/multi_703_upgrade.sql new file mode 100644 index 000000000..fc2fb8319 --- /dev/null +++ b/src/test/regress/sql/multi_703_upgrade.sql @@ -0,0 +1,25 @@ +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 103000; + +-- tests that the upgrade from 7.0-2 to 7.0-3 properly migrates shard placements + +DROP EXTENSION citus; +SET citus.enable_version_checks TO 'false'; + +CREATE EXTENSION citus VERSION '7.0-2'; + +INSERT INTO pg_dist_shard_placement + (placementid, shardid, shardstate, shardlength, nodename, nodeport) VALUES + (1, 1, 1, 0, 'localhost', :worker_1_port); + +-- if there are no worker nodes which match the shards this should fail +ALTER EXTENSION citus UPDATE TO '7.0-3'; + +-- if you add a matching worker the upgrade should succeed +SELECT master_add_node('localhost', :worker_1_port); +ALTER EXTENSION citus UPDATE TO '7.0-3'; + +SELECT * FROM pg_dist_placement; + +-- reset and prepare for the rest of the tests +DROP EXTENSION citus; +CREATE EXTENSION citus;