citus/src/test/regress/expected/single_node_enterprise.out

512 lines
19 KiB
Plaintext

-- we already have lots of tests targeting
-- single node citus clusters in sql/single_node.sql
-- in this file, we are testing enterprise features
CREATE SCHEMA single_node_ent;
SET search_path TO single_node_ent;
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 90730500;
-- idempotently add node to allow this test to run without add_coordinator
SET client_min_messages TO WARNING;
SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0);
?column?
---------------------------------------------------------------------
1
(1 row)
RESET client_min_messages;
SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhaveshards', true);
?column?
---------------------------------------------------------------------
1
(1 row)
CREATE USER full_access_single_node;
CREATE USER read_access_single_node;
CREATE USER no_access_single_node;
CREATE TYPE new_type AS (n int, m text);
CREATE TABLE test(x int, y int, z new_type);
SELECT create_distributed_table('test','x');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE TABLE ref(a int, b int);
SELECT create_reference_table('ref');
create_reference_table
---------------------------------------------------------------------
(1 row)
-- we want to test replicate_table_shards()
-- which requiest statement based
CREATE TABLE statement_replicated(a int PRIMARY KEY);
SELECT create_distributed_table('statement_replicated','a', colocate_with:='none');
create_distributed_table
---------------------------------------------------------------------
(1 row)
UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='statement_replicated'::regclass;
-- We create this function to make sure
-- GRANT ALL ON ALL FUNCTIONS IN SCHEMA doesn't get stuck.
CREATE FUNCTION notice(text)
RETURNS void
LANGUAGE plpgsql AS $$
BEGIN
RAISE NOTICE '%', $1;
END;
$$;
SELECT create_distributed_function('notice(text)');
create_distributed_function
---------------------------------------------------------------------
(1 row)
-- allow access to various users
GRANT ALL ON TABLE test,ref TO full_access_single_node;
GRANT USAGE ON SCHEMA single_node_ent TO full_access_single_node;
ALTER ROLE full_access_single_node WITH LOGIN;
GRANT ALL PRIVILEGES ON DATABASE postgres TO no_access_single_node;
REVOKE ALL PRIVILEGES ON DATABASE postgres FROM no_access_single_node;
REVOKE USAGE, CREATE ON SCHEMA single_node_ent FROM no_access_single_node;
GRANT SELECT ON ref,test TO no_access_single_node;
REVOKE SELECT ON ref,test FROM no_access_single_node;
-- we have to use local execution, otherwise we hit to a known issue
-- (see https://github.com/citusdata/citus-enterprise/ issues/474)
-- to force local execution, use transaction block
BEGIN;
GRANT USAGE ON SCHEMA single_node_ent TO read_access_single_node;
GRANT SELECT ON ALL TABLES IN SCHEMA single_node_ent TO read_access_single_node;
COMMIT;
-- revoke SELECT access for the next 3-4 tests
REVOKE SELECT ON test FROM read_access_single_node;
-- Make sure the access is revoked
SET ROLE read_access_single_node;
SELECT COUNT(*) FROM test;
ERROR: permission denied for table test
SET ROLE postgres;
BEGIN;
GRANT SELECT ON ALL TABLES IN SCHEMA single_node_ent TO read_access_single_node;
-- Make sure we can now read as read_access_single_node role
SET ROLE read_access_single_node;
SELECT COUNT(*) FROM test;
count
---------------------------------------------------------------------
0
(1 row)
SET ROLE postgres;
-- Make sure REVOKE .. IN SCHEMA also works
REVOKE SELECT ON ALL TABLES IN SCHEMA single_node_ent FROM read_access_single_node;
SET ROLE read_access_single_node;
SELECT COUNT(*) FROM test;
ERROR: permission denied for table test
ROLLBACK;
GRANT ALL ON ALL FUNCTIONS IN SCHEMA single_node_ent to full_access_single_node;
GRANT SELECT ON ALL TABLES IN SCHEMA single_node_ent TO read_access_single_node;
GRANT SELECT ON test, ref TO read_access_single_node;
SET ROLE read_access_single_node;
-- Make sure we can now read as read_access_single_node role
SELECT COUNT(*) FROM test;
count
---------------------------------------------------------------------
0
(1 row)
SET ROLE full_access_single_node;
INSERT INTO test VALUES (1, 1, (95, 'citus9.5')::new_type);
-- should fail as only read access is allowed
SET ROLE read_access_single_node;
INSERT INTO test VALUES (1, 1, (95, 'citus9.5')::new_type);
ERROR: permission denied for table test
SELECT nodeid AS coordinator_node_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :master_port
\gset
-- pg_dist_poolinfo should work fine for coordinator
-- put outright bad values
SET ROLE postgres;
INSERT INTO pg_dist_poolinfo VALUES (:coordinator_node_id, 'host=failhost');
\c
SET search_path TO single_node_ent;
\set VERBOSITY terse
-- supress OS specific error message
DO $$
BEGIN
BEGIN
-- we want to force remote execution
SET LOCAL citus.enable_local_execution TO false;
SET LOCAL client_min_messages TO ERROR;
SELECT COUNT(*) FROM test;
EXCEPTION WHEN OTHERS THEN
IF SQLERRM LIKE 'connection to the remote node%%' THEN
RAISE 'failed to execute select';
END IF;
END;
END;
$$;
ERROR: failed to execute select
TRUNCATE pg_dist_poolinfo;
-- using 127.0.0.1 should work fine
INSERT INTO pg_dist_poolinfo VALUES (:coordinator_node_id, 'host=127.0.0.1 port=' || :master_port);
\c
SET search_path TO single_node_ent;
SET citus.log_remote_commands TO ON;
SET client_min_messages TO DEBUG1;
-- force multi-shard query to be able to
-- have remote connections
SELECT COUNT(*) FROM test WHERE x = 1 OR x = 2;
NOTICE: issuing SELECT count(*) AS count FROM single_node_ent.test_90730500 test WHERE ((x OPERATOR(pg_catalog.=) 1) OR (x OPERATOR(pg_catalog.=) 2))
NOTICE: issuing SELECT count(*) AS count FROM single_node_ent.test_90730503 test WHERE ((x OPERATOR(pg_catalog.=) 1) OR (x OPERATOR(pg_catalog.=) 2))
count
---------------------------------------------------------------------
1
(1 row)
RESET citus.log_remote_commands;
RESET client_min_messages;
TRUNCATE pg_dist_poolinfo;
-- reconnect
\c
SET search_path TO single_node_ent;
-- now, create a colocated table
-- add a new node, and move the
-- shards to the new node
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 90731500;
CREATE TABLE colocated_table (x int PRIMARY KEY, y int);
SELECT create_distributed_table('colocated_table','x', colocate_with:='single_node_ent.test');
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- create some foreign keys
TRUNCATE test, ref;
ALTER TABLE test ADD CONSTRAINT p_key PRIMARY KEY(x);
ALTER TABLE ref ADD CONSTRAINT p_key_2 PRIMARY KEY(a);
ALTER TABLE colocated_table ADD CONSTRAINT fkey FOREIGN KEY (x) REFERENCES test(x);
ALTER TABLE test ADD CONSTRAINT fkey FOREIGN KEY (x) REFERENCES ref(a);
-- load some data
INSERT INTO ref SELECT i, i*2 FROM generate_series(0,50)i;
INSERT INTO test SELECT i, i*2, (i, 'citus' || i)::new_type FROM generate_series(0,50)i;
INSERT INTO colocated_table SELECT i, i*2 FROM generate_series(0,50)i;
-- run a very basic query
SELECT count(*) FROM (test JOIN colocated_table USING (x)) as foo LEFT JOIN ref ON(foo.x = a);
count
---------------------------------------------------------------------
51
(1 row)
CREATE VIEW view_created_before_shard_moves AS
SELECT count(*) FROM (test JOIN colocated_table USING (x)) as foo LEFT JOIN ref ON(foo.x = a);
SELECT * FROM view_created_before_shard_moves;
count
---------------------------------------------------------------------
51
(1 row)
-- show that tenant isolation works fine
SELECT isolate_tenant_to_new_shard('test', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard
---------------------------------------------------------------------
90731505
(1 row)
-- in the first iteration, have an
-- hybrid cluster meaning that
-- the shards exists on both the coordinator
-- and on the workers
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
NOTICE: shards are still on the coordinator after adding the new node
?column?
---------------------------------------------------------------------
1
(1 row)
-- make sure that we can replicate tables as well
select replicate_table_shards('statement_replicated', shard_replication_factor:=2, shard_transfer_mode:='block_writes');
NOTICE: Copying shard xxxxx from localhost:xxxxx to localhost:xxxxx ...
NOTICE: Copying shard xxxxx from localhost:xxxxx to localhost:xxxxx ...
NOTICE: Copying shard xxxxx from localhost:xxxxx to localhost:xxxxx ...
NOTICE: Copying shard xxxxx from localhost:xxxxx to localhost:xxxxx ...
replicate_table_shards
---------------------------------------------------------------------
(1 row)
-- we don't need the table anymore, it complicates the output of rebalances
DROP TABLE statement_replicated;
-- move 1 set of colocated shards in non-blocking mode
-- and the other in block_writes
SELECT rebalance_table_shards(max_shard_moves:=1);
NOTICE: Stopped searching before we were out of moves. Please rerun the rebalancer after it's finished for a more optimal placement.
NOTICE: Moving shard xxxxx from localhost:xxxxx to localhost:xxxxx ...
rebalance_table_shards
---------------------------------------------------------------------
(1 row)
SELECT rebalance_table_shards(shard_transfer_mode:='block_writes');
NOTICE: Moving shard xxxxx from localhost:xxxxx to localhost:xxxxx ...
NOTICE: Moving shard xxxxx from localhost:xxxxx to localhost:xxxxx ...
rebalance_table_shards
---------------------------------------------------------------------
(1 row)
-- should fail as only read access is allowed
SET ROLE read_access_single_node;
INSERT INTO test VALUES (1, 1, (95, 'citus9.5')::new_type);
ERROR: permission denied for table test
SET ROLE postgres;
\c
SET search_path TO single_node_ent;
-- the same query should work
SELECT count(*) FROM (test JOIN colocated_table USING (x)) as foo LEFT JOIN ref ON(foo.x = a);
count
---------------------------------------------------------------------
51
(1 row)
-- make sure that composite type is created
-- on the worker
SELECT * FROM test ORDER BY 1 DESC, 2, 3 LIMIT 1;
x | y | z
---------------------------------------------------------------------
50 | 100 | (50,citus50)
(1 row)
-- make sure that we can execute with intermediate
-- results that are needed on all shards on the
-- final step
WITH cte_1 AS (SELECT * FROM test ORDER BY 1 DESC, 2, 3 LIMIT 5)
SELECT count(*) FROM colocated_table JOIN cte_1 USING (x);
count
---------------------------------------------------------------------
5
(1 row)
-- make sure that we can still query the view
SELECT * FROM view_created_before_shard_moves;
count
---------------------------------------------------------------------
51
(1 row)
-- we should be able to switch the cluster to CitusMX
SELECT start_metadata_sync_to_node('localhost', :master_port);
NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
start_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
start_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
-- sanity-check: the same queries should work
SELECT count(*) FROM (test JOIN colocated_table USING (x)) as foo LEFT JOIN ref ON(foo.x = a);
count
---------------------------------------------------------------------
51
(1 row)
SELECT * FROM test ORDER BY 1 DESC,2,3 LIMIT 1;
x | y | z
---------------------------------------------------------------------
50 | 100 | (50,citus50)
(1 row)
WITH cte_1 AS (SELECT * FROM test ORDER BY 1 DESC,2,3 LIMIT 5)
SELECT count(*) FROM colocated_table JOIN cte_1 USING (x);
count
---------------------------------------------------------------------
5
(1 row)
-- all DDLs should work
ALTER TABLE colocated_table ADD COLUMN z single_node_ent.new_type;
UPDATE colocated_table SET z = (x, y::text)::new_type;
SELECT * FROM colocated_table ORDER BY 1 DESC,2,3 LIMIT 1;
x | y | z
---------------------------------------------------------------------
50 | 100 | (50,100)
(1 row)
CREATE INDEX i_colocated_table ON colocated_table(y);
BEGIN;
CREATE INDEX i_colocated_table_2 ON colocated_table(x,y);
ROLLBACK;
-- sanity check: transaction blocks spanning both nodes should work fine
BEGIN;
UPDATE colocated_table SET y = y + 1;
UPDATE test SET y = y -1;
SELECT max(y) FROM colocated_table;
max
---------------------------------------------------------------------
101
(1 row)
SELECT max(y) FROM test;
max
---------------------------------------------------------------------
99
(1 row)
ROLLBACK;
-- generate data so that we can enforce fkeys
INSERT INTO ref SELECT i, i*2 FROM generate_series(100,150)i;
-- the first insert goes to a shard on the worker
-- the second insert goes to a shard on the coordinator
BEGIN;
SET LOCAL citus.log_remote_commands TO ON;
INSERT INTO test(x,y) VALUES (101,100);
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
NOTICE: issuing INSERT INTO single_node_ent.test_90730501 (x, y) VALUES (101, 100)
INSERT INTO test(x,y) VALUES (102,100);
NOTICE: issuing INSERT INTO single_node_ent.test_90730502 (x, y) VALUES (102, 100)
-- followed by a multi-shard command
SELECT count(*) FROM test;
NOTICE: issuing SELECT count(*) AS count FROM single_node_ent.test_90730501 test WHERE true
NOTICE: issuing SELECT count(*) AS count FROM single_node_ent.test_90730502 test WHERE true
NOTICE: issuing SELECT count(*) AS count FROM single_node_ent.test_90730503 test WHERE true
NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node_ent.test_90731504 test WHERE true
NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node_ent.test_90731505 test WHERE true
NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node_ent.test_90731506 test WHERE true
count
---------------------------------------------------------------------
53
(1 row)
ROLLBACK;
NOTICE: issuing ROLLBACK
-- the first insert goes to a shard on the coordinator
-- the second insert goes to a shard on the worker
BEGIN;
SET LOCAL citus.log_remote_commands TO ON;
INSERT INTO test(x,y) VALUES (102,100);
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
NOTICE: issuing INSERT INTO single_node_ent.test_90730502 (x, y) VALUES (102, 100)
INSERT INTO test(x,y) VALUES (101,100);
NOTICE: issuing INSERT INTO single_node_ent.test_90730501 (x, y) VALUES (101, 100)
-- followed by a multi-shard command
SELECT count(*) FROM test;
NOTICE: issuing SELECT count(*) AS count FROM single_node_ent.test_90730501 test WHERE true
NOTICE: issuing SELECT count(*) AS count FROM single_node_ent.test_90730502 test WHERE true
NOTICE: issuing SELECT count(*) AS count FROM single_node_ent.test_90730503 test WHERE true
NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node_ent.test_90731504 test WHERE true
NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node_ent.test_90731505 test WHERE true
NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node_ent.test_90731506 test WHERE true
count
---------------------------------------------------------------------
53
(1 row)
ROLLBACK;
NOTICE: issuing ROLLBACK
-- now, lets move all the shards of distributed tables out of the coordinator
-- block writes is much faster for the sake of the test timings we prefer it
SELECT master_drain_node('localhost', :master_port, shard_transfer_mode:='block_writes');
NOTICE: Moving shard xxxxx from localhost:xxxxx to localhost:xxxxx ...
NOTICE: Moving shard xxxxx from localhost:xxxxx to localhost:xxxxx ...
NOTICE: Moving shard xxxxx from localhost:xxxxx to localhost:xxxxx ...
master_drain_node
---------------------------------------------------------------------
(1 row)
-- should return false as master_drain_node had just set it to false for coordinator
SELECT shouldhaveshards FROM pg_dist_node WHERE nodeport = :master_port;
shouldhaveshards
---------------------------------------------------------------------
f
(1 row)
-- sanity-check: the same queries should work
SELECT count(*) FROM (test JOIN colocated_table USING (x)) as foo LEFT JOIN ref ON(foo.x = a);
count
---------------------------------------------------------------------
51
(1 row)
SELECT * FROM test ORDER BY 1 DESC,2 ,3 LIMIT 1;
x | y | z
---------------------------------------------------------------------
50 | 100 | (50,citus50)
(1 row)
WITH cte_1 AS (SELECT * FROM test ORDER BY 1 DESC,2 , 3 LIMIT 5)
SELECT count(*) FROM colocated_table JOIN cte_1 USING (x);
count
---------------------------------------------------------------------
5
(1 row)
-- make sure that we can still query the view
SELECT * FROM view_created_before_shard_moves;
count
---------------------------------------------------------------------
51
(1 row)
-- and make sure that all the shards are remote
BEGIN;
SET LOCAL citus.log_remote_commands TO ON;
INSERT INTO test(x,y) VALUES (101,100);
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
NOTICE: issuing INSERT INTO single_node_ent.test_90730501 (x, y) VALUES (101, 100)
INSERT INTO test(x,y) VALUES (102,100);
NOTICE: issuing INSERT INTO single_node_ent.test_90730502 (x, y) VALUES (102, 100)
-- followed by a multi-shard command
SELECT count(*) FROM test;
NOTICE: issuing SELECT count(*) AS count FROM single_node_ent.test_90730501 test WHERE true
NOTICE: issuing SELECT count(*) AS count FROM single_node_ent.test_90730502 test WHERE true
NOTICE: issuing SELECT count(*) AS count FROM single_node_ent.test_90730503 test WHERE true
NOTICE: issuing SELECT count(*) AS count FROM single_node_ent.test_90731504 test WHERE true
NOTICE: issuing SELECT count(*) AS count FROM single_node_ent.test_90731505 test WHERE true
NOTICE: issuing SELECT count(*) AS count FROM single_node_ent.test_90731506 test WHERE true
count
---------------------------------------------------------------------
53
(1 row)
ROLLBACK;
NOTICE: issuing ROLLBACK
-- should fail as only read access is allowed
SET ROLE read_access_single_node;
INSERT INTO test VALUES (1, 1, (95, 'citus9.5')::new_type);
ERROR: permission denied for table test
SET ROLE postgres;
\c
SET search_path TO single_node_ent;
-- Cleanup
RESET citus.log_remote_commands;
SET client_min_messages TO WARNING;
DROP SCHEMA single_node_ent CASCADE;
DROP OWNED BY full_access_single_node;
DROP OWNED BY read_access_single_node;
DROP ROLE full_access_single_node;
DROP ROLE read_access_single_node;
-- remove the nodes for next tests
SELECT 1 FROM master_remove_node('localhost', :master_port);
?column?
---------------------------------------------------------------------
1
(1 row)
SELECT 1 FROM master_remove_node('localhost', :worker_1_port);
?column?
---------------------------------------------------------------------
1
(1 row)
-- restart nodeid sequence so that multi_cluster_management still has the same
-- nodeids
ALTER SEQUENCE pg_dist_node_nodeid_seq RESTART 1;
ALTER SEQUENCE pg_dist_groupid_seq RESTART 1;