citus/src/test/regress/expected/replicated_table_disable_no...

133 lines
4.1 KiB
Plaintext

CREATE SCHEMA disable_node_with_replicated_tables;
SET search_path TO disable_node_with_replicated_tables;
SET citus.next_shard_id TO 101500;
SET citus.shard_replication_factor TO 2;
CREATE TABLE replicated(a int, b int);
SELECT create_distributed_table('replicated', 'a', shard_count:=2);
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE TABLE ref (a int, b int);
SELECT create_reference_table('ref');
create_reference_table
---------------------------------------------------------------------
(1 row)
INSERT INTO replicated SELECT i,i FROM generate_series(0,10)i;
INSERT INTO ref SELECT i,i FROM generate_series(0,10)i;
-- should be successfully disable node
SELECT citus_disable_node('localhost', :worker_2_port, true);
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT citus_activate_node('localhost', 57638) to activate this node back.
citus_disable_node
---------------------------------------------------------------------
(1 row)
SELECT public.wait_until_metadata_sync();
wait_until_metadata_sync
---------------------------------------------------------------------
(1 row)
-- the placement should be removed both from the coordinator
-- and from the workers
SELECT count(*) FROM pg_dist_placement WHERE shardid IN (101500, 101501, 101502);
count
---------------------------------------------------------------------
3
(1 row)
\c - - - :worker_1_port
SELECT count(*) FROM pg_dist_placement WHERE shardid IN (101500, 101501, 101502);
count
---------------------------------------------------------------------
3
(1 row)
SET search_path TO disable_node_with_replicated_tables;
-- should be able to ingest data from both the worker and the coordinator
INSERT INTO replicated SELECT i,i FROM generate_series(0,10)i;
INSERT INTO ref SELECT i,i FROM generate_series(0,10)i;
\c - - - :master_port
SET search_path TO disable_node_with_replicated_tables;
-- should be able to ingest data from both the worker and the coordinator
INSERT INTO replicated SELECT i,i FROM generate_series(0,10)i;
INSERT INTO ref SELECT i,i FROM generate_series(0,10)i;
-- now, query with round-robin policy such that
-- each query should hit different replicas
SET citus.task_assignment_policy to "round-robin";
SELECT count(*) FROM ref;
count
---------------------------------------------------------------------
33
(1 row)
SELECT count(*) FROM ref;
count
---------------------------------------------------------------------
33
(1 row)
SELECT count(*) FROM replicated;
count
---------------------------------------------------------------------
33
(1 row)
SELECT count(*) FROM replicated;
count
---------------------------------------------------------------------
33
(1 row)
-- now, we should be able to replicate the shards back
SET client_min_messages TO ERROR;
SELECT 1 FROM citus_activate_node('localhost', :worker_2_port);
?column?
---------------------------------------------------------------------
1
(1 row)
SELECT 1 FROM replicate_table_shards('replicated', shard_replication_factor:=2, shard_transfer_mode:='block_writes');
?column?
---------------------------------------------------------------------
1
(1 row)
RESET client_min_messages;
-- should be able to ingest data from both the worker and the coordinator
INSERT INTO replicated SELECT i,i FROM generate_series(0,10)i;
INSERT INTO ref SELECT i,i FROM generate_series(0,10)i;
-- now, query with round-robin policy such that
-- each query should hit different replicas
SET citus.task_assignment_policy to "round-robin";
SELECT count(*) FROM ref;
count
---------------------------------------------------------------------
44
(1 row)
SELECT count(*) FROM ref;
count
---------------------------------------------------------------------
44
(1 row)
SELECT count(*) FROM replicated;
count
---------------------------------------------------------------------
44
(1 row)
SELECT count(*) FROM replicated;
count
---------------------------------------------------------------------
44
(1 row)
SET client_min_messages TO ERROR;
DROP SCHEMA disable_node_with_replicated_tables CASCADE;