mirror of https://github.com/citusdata/citus.git
Fix more tests
parent
ef2361f091
commit
5e5a2147cd
|
@ -97,6 +97,8 @@ GRANT SELECT ON pg_catalog.pg_dist_cleanup TO public;
|
||||||
-- start with a higher number.
|
-- start with a higher number.
|
||||||
CREATE SEQUENCE citus.pg_dist_operationid_seq;
|
CREATE SEQUENCE citus.pg_dist_operationid_seq;
|
||||||
ALTER SEQUENCE citus.pg_dist_operationid_seq SET SCHEMA pg_catalog;
|
ALTER SEQUENCE citus.pg_dist_operationid_seq SET SCHEMA pg_catalog;
|
||||||
|
GRANT SELECT ON pg_catalog.pg_dist_operationid_seq TO public;
|
||||||
|
|
||||||
CREATE SEQUENCE citus.pg_dist_cleanup_recordid_seq;
|
CREATE SEQUENCE citus.pg_dist_cleanup_recordid_seq;
|
||||||
ALTER SEQUENCE citus.pg_dist_cleanup_recordid_seq SET SCHEMA pg_catalog;
|
ALTER SEQUENCE citus.pg_dist_cleanup_recordid_seq SET SCHEMA pg_catalog;
|
||||||
|
GRANT SELECT ON pg_catalog.pg_dist_cleanup_recordid_seq TO public;
|
||||||
|
|
|
@ -330,6 +330,15 @@ SELECT citus.mitmproxy('conn.onQuery(query="select pg_drop_replication_slot").ca
|
||||||
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
|
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
|
||||||
ERROR: canceling statement due to user request
|
ERROR: canceling statement due to user request
|
||||||
-- failure on dropping old shard
|
-- failure on dropping old shard
|
||||||
|
-- failure on dropping old colocated shard
|
||||||
|
-- Disable deferred drop otherwise we will skip the drop and operation will succeed instead of failing.
|
||||||
|
ALTER SYSTEM SET citus.defer_drop_after_shard_split TO false;
|
||||||
|
SELECT pg_reload_conf();
|
||||||
|
pg_reload_conf
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").kill()');
|
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").kill()');
|
||||||
mitmproxy
|
mitmproxy
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
|
@ -367,6 +376,14 @@ SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolatio
|
||||||
|
|
||||||
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
|
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
|
||||||
ERROR: canceling statement due to user request
|
ERROR: canceling statement due to user request
|
||||||
|
-- Re-enable deferred drop for rest of the tests.
|
||||||
|
ALTER SYSTEM SET citus.defer_drop_after_shard_split TO true;
|
||||||
|
SELECT pg_reload_conf();
|
||||||
|
pg_reload_conf
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
-- failure on foreign key creation
|
-- failure on foreign key creation
|
||||||
SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey FOREIGN KEY").kill()');
|
SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey FOREIGN KEY").kill()');
|
||||||
mitmproxy
|
mitmproxy
|
||||||
|
|
|
@ -19,7 +19,9 @@ WHERE
|
||||||
oid
|
oid
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
pg_dist_authinfo
|
pg_dist_authinfo
|
||||||
(1 row)
|
pg_dist_operationid_seq
|
||||||
|
pg_dist_cleanup_recordid_seq
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
RESET role;
|
RESET role;
|
||||||
DROP USER no_access;
|
DROP USER no_access;
|
||||||
|
|
|
@ -232,12 +232,15 @@ ORDER BY 1;
|
||||||
function worker_split_shard_replication_setup(split_shard_info[])
|
function worker_split_shard_replication_setup(split_shard_info[])
|
||||||
schema citus
|
schema citus
|
||||||
schema citus_internal
|
schema citus_internal
|
||||||
|
sequence pg_dist_cleanup_recordid_seq
|
||||||
sequence pg_dist_colocationid_seq
|
sequence pg_dist_colocationid_seq
|
||||||
sequence pg_dist_groupid_seq
|
sequence pg_dist_groupid_seq
|
||||||
sequence pg_dist_node_nodeid_seq
|
sequence pg_dist_node_nodeid_seq
|
||||||
|
sequence pg_dist_operationid_seq
|
||||||
sequence pg_dist_placement_placementid_seq
|
sequence pg_dist_placement_placementid_seq
|
||||||
sequence pg_dist_shardid_seq
|
sequence pg_dist_shardid_seq
|
||||||
table pg_dist_authinfo
|
table pg_dist_authinfo
|
||||||
|
table pg_dist_cleanup
|
||||||
table pg_dist_colocation
|
table pg_dist_colocation
|
||||||
table pg_dist_local_group
|
table pg_dist_local_group
|
||||||
table pg_dist_node
|
table pg_dist_node
|
||||||
|
@ -267,5 +270,5 @@ ORDER BY 1;
|
||||||
view citus_stat_statements
|
view citus_stat_statements
|
||||||
view pg_dist_shard_placement
|
view pg_dist_shard_placement
|
||||||
view time_partitions
|
view time_partitions
|
||||||
(259 rows)
|
(262 rows)
|
||||||
|
|
||||||
|
|
|
@ -158,6 +158,11 @@ SELECT citus.mitmproxy('conn.onQuery(query="select pg_drop_replication_slot").ca
|
||||||
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
|
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
|
||||||
|
|
||||||
-- failure on dropping old shard
|
-- failure on dropping old shard
|
||||||
|
-- failure on dropping old colocated shard
|
||||||
|
-- Disable deferred drop otherwise we will skip the drop and operation will succeed instead of failing.
|
||||||
|
ALTER SYSTEM SET citus.defer_drop_after_shard_split TO false;
|
||||||
|
SELECT pg_reload_conf();
|
||||||
|
|
||||||
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").kill()');
|
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").kill()');
|
||||||
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
|
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
|
||||||
|
|
||||||
|
@ -173,6 +178,10 @@ SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode
|
||||||
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").cancel(' || :pid || ')');
|
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").cancel(' || :pid || ')');
|
||||||
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
|
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
|
||||||
|
|
||||||
|
-- Re-enable deferred drop for rest of the tests.
|
||||||
|
ALTER SYSTEM SET citus.defer_drop_after_shard_split TO true;
|
||||||
|
SELECT pg_reload_conf();
|
||||||
|
|
||||||
-- failure on foreign key creation
|
-- failure on foreign key creation
|
||||||
SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey FOREIGN KEY").kill()');
|
SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey FOREIGN KEY").kill()');
|
||||||
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
|
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
|
||||||
|
|
|
@ -8,6 +8,8 @@ SELECT nextval('pg_dist_placement_placementid_seq') = MAX(placementid)+1 FROM pg
|
||||||
SELECT nextval('pg_dist_groupid_seq') = MAX(groupid)+1 FROM pg_dist_node;
|
SELECT nextval('pg_dist_groupid_seq') = MAX(groupid)+1 FROM pg_dist_node;
|
||||||
SELECT nextval('pg_dist_node_nodeid_seq') = MAX(nodeid)+1 FROM pg_dist_node;
|
SELECT nextval('pg_dist_node_nodeid_seq') = MAX(nodeid)+1 FROM pg_dist_node;
|
||||||
SELECT nextval('pg_dist_colocationid_seq') = MAX(colocationid)+1 FROM pg_dist_colocation;
|
SELECT nextval('pg_dist_colocationid_seq') = MAX(colocationid)+1 FROM pg_dist_colocation;
|
||||||
|
SELECT nextval('pg_dist_operationid_seq') = MAX(operation_id)+1 FROM pg_dist_node;
|
||||||
|
SELECT nextval('pg_dist_cleanup_recordid_seq') = MAX(record_id)+1 FROM pg_dist_colocation;
|
||||||
|
|
||||||
-- If this query gives output it means we've added a new sequence that should
|
-- If this query gives output it means we've added a new sequence that should
|
||||||
-- possibly be restored after upgrades.
|
-- possibly be restored after upgrades.
|
||||||
|
@ -19,7 +21,9 @@ SELECT sequence_name FROM information_schema.sequences
|
||||||
'pg_dist_placement_placementid_seq',
|
'pg_dist_placement_placementid_seq',
|
||||||
'pg_dist_groupid_seq',
|
'pg_dist_groupid_seq',
|
||||||
'pg_dist_node_nodeid_seq',
|
'pg_dist_node_nodeid_seq',
|
||||||
'pg_dist_colocationid_seq'
|
'pg_dist_colocationid_seq',
|
||||||
|
'pg_dist_operationid_seq',
|
||||||
|
'pg_dist_cleanup_recordid_seq'
|
||||||
);
|
);
|
||||||
|
|
||||||
SELECT logicalrelid FROM pg_dist_partition
|
SELECT logicalrelid FROM pg_dist_partition
|
||||||
|
|
Loading…
Reference in New Issue