Single Shard Misc UDFs 2 (#6963)

Creating a second PR to make reviewing easier.
This PR tests:
- replicate_reference_tables
- fix_partition_shard_index_names
- isolate_tenant_to_new_shard
- replicate_table_shards
pull/6967/head
Halil Ozan Akgül 2023-06-02 13:46:14 +03:00 committed by GitHub
parent ac7f732be2
commit 3e183746b7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 189 additions and 2 deletions

View File

@ -1178,6 +1178,11 @@ replicate_table_shards(PG_FUNCTION_ARGS)
ArrayType *excludedShardArray = PG_GETARG_ARRAYTYPE_P(3);
Oid shardReplicationModeOid = PG_GETARG_OID(4);
if (IsCitusTableType(relationId, SINGLE_SHARD_DISTRIBUTED))
{
ereport(ERROR, (errmsg("cannot replicate single shard tables' shards")));
}
char transferMode = LookupShardTransferMode(shardReplicationModeOid);
EnsureReferenceTablesExistOnAllNodesExtended(transferMode);

View File

@ -397,7 +397,7 @@ SELECT shardid, nodeport FROM pg_dist_shard_placement WHERE shardid > 1820000 OR
(7 rows)
-- verify we didn't break any colocations
SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE logicalrelid::text LIKE '%single_shard_table_col%' ORDER BY colocationid;
SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE logicalrelid::text LIKE '%single_shard_table_col%' ORDER BY colocationid, logicalrelid;
logicalrelid | colocationid
---------------------------------------------------------------------
single_shard_table_col1_1 | 198001
@ -952,5 +952,121 @@ SELECT is_citus_depended_object('pg_class'::regclass, 'citus_dep_tbl'::regclass)
t
(1 row)
RESET citus.hide_citus_dependent_objects;
-- test replicate_reference_tables
SET client_min_messages TO WARNING;
DROP SCHEMA null_dist_key_udfs CASCADE;
RESET client_min_messages;
CREATE SCHEMA null_dist_key_udfs;
SET search_path TO null_dist_key_udfs;
SELECT citus_remove_node('localhost', :worker_2_port);
citus_remove_node
---------------------------------------------------------------------
(1 row)
CREATE TABLE rep_ref (a INT UNIQUE);
SELECT create_reference_table('rep_ref');
create_reference_table
---------------------------------------------------------------------
(1 row)
CREATE TABLE rep_sing (a INT);
SELECT create_distributed_table('rep_sing', NULL, colocate_with:='none');
create_distributed_table
---------------------------------------------------------------------
(1 row)
ALTER TABLE rep_sing ADD CONSTRAINT rep_fkey FOREIGN KEY (a) REFERENCES rep_ref(a);
SELECT 1 FROM citus_add_node('localhost', :worker_2_port);
?column?
---------------------------------------------------------------------
1
(1 row)
SELECT count(*) FROM citus_shards WHERE table_name = 'rep_ref'::regclass AND nodeport = :worker_2_port;
count
---------------------------------------------------------------------
0
(1 row)
SELECT replicate_reference_tables('block_writes');
replicate_reference_tables
---------------------------------------------------------------------
(1 row)
SELECT count(*) FROM citus_shards WHERE table_name = 'rep_ref'::regclass AND nodeport = :worker_2_port;
count
---------------------------------------------------------------------
1
(1 row)
-- test fix_partition_shard_index_names
SET citus.next_shard_id TO 3820000;
CREATE TABLE part_tbl_sing (dist_col int, another_col int, partition_col timestamp) PARTITION BY RANGE (partition_col);
SELECT create_distributed_table('part_tbl_sing', NULL, colocate_with:='none');
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- create a partition with a long name and another with a short name
CREATE TABLE partition_table_with_very_long_name PARTITION OF part_tbl_sing FOR VALUES FROM ('2018-01-01') TO ('2019-01-01');
CREATE TABLE p PARTITION OF part_tbl_sing FOR VALUES FROM ('2019-01-01') TO ('2020-01-01');
-- create an index on parent table
-- we will see that it doesn't matter whether we name the index on parent or not
-- indexes auto-generated on partitions will not use this name
-- SELECT fix_partition_shard_index_names('dist_partitioned_table') will be executed
-- automatically at the end of the CREATE INDEX command
CREATE INDEX short ON part_tbl_sing USING btree (another_col, partition_col);
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'null_dist_key_udfs' AND tablename SIMILAR TO 'p%' ORDER BY 1, 2;
tablename | indexname
---------------------------------------------------------------------
p | p_another_col_partition_col_idx
part_tbl_sing | short
partition_table_with_very_long_name | partition_table_with_very_long_na_another_col_partition_col_idx
(3 rows)
SELECT nodeport AS part_tbl_sing_port
FROM citus_shards
WHERE table_name = 'part_tbl_sing'::regclass AND
nodeport IN (:worker_1_port, :worker_2_port) \gset
\c - - - :part_tbl_sing_port
-- the names are generated correctly
-- shard id has been appended to all index names which didn't end in shard id
-- this goes in line with Citus's way of naming indexes of shards: always append shardid to the end
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'null_dist_key_udfs' AND tablename SIMILAR TO 'p%\_\d*' ORDER BY 1, 2;
tablename | indexname
---------------------------------------------------------------------
p_3820002 | p_another_col_partition_col_idx_3820002
part_tbl_sing_3820000 | short_3820000
partition_table_with_very_long_name_3820001 | partition_table_with_very_long_na_another_col__dd884a3b_3820001
(3 rows)
\c - - - :master_port
SET search_path TO null_dist_key_udfs;
--test isolate_tenant_to_new_shard
CREATE TABLE iso_tbl (a INT);
SELECT create_distributed_table('iso_tbl', NULL, colocate_with:='none');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('iso_tbl', 5);
ERROR: cannot isolate tenant because tenant isolation is only support for hash distributed tables
-- test replicate_table_shards
CREATE TABLE rep_tbl (a INT);
SELECT create_distributed_table('rep_tbl', NULL, colocate_with:='none');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT replicate_table_shards('rep_tbl');
ERROR: cannot replicate single shard tables' shards
SET client_min_messages TO WARNING;
DROP SCHEMA null_dist_key_udfs CASCADE;

View File

@ -173,7 +173,7 @@ SELECT rebalance_table_shards();
SELECT shardid, nodeport FROM pg_dist_shard_placement WHERE shardid > 1820000 ORDER BY shardid;
-- verify we didn't break any colocations
SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE logicalrelid::text LIKE '%single_shard_table_col%' ORDER BY colocationid;
SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE logicalrelid::text LIKE '%single_shard_table_col%' ORDER BY colocationid, logicalrelid;
-- drop preexisting tables
-- we can remove the drop commands once the issue is fixed: https://github.com/citusdata/citus/issues/6948
@ -436,6 +436,72 @@ CREATE TABLE citus_dep_tbl (a noderole);
SELECT create_distributed_table('citus_dep_tbl', NULL, colocate_with:='none');
SELECT is_citus_depended_object('pg_class'::regclass, 'citus_dep_tbl'::regclass);
RESET citus.hide_citus_dependent_objects;
-- test replicate_reference_tables
SET client_min_messages TO WARNING;
DROP SCHEMA null_dist_key_udfs CASCADE;
RESET client_min_messages;
CREATE SCHEMA null_dist_key_udfs;
SET search_path TO null_dist_key_udfs;
SELECT citus_remove_node('localhost', :worker_2_port);
CREATE TABLE rep_ref (a INT UNIQUE);
SELECT create_reference_table('rep_ref');
CREATE TABLE rep_sing (a INT);
SELECT create_distributed_table('rep_sing', NULL, colocate_with:='none');
ALTER TABLE rep_sing ADD CONSTRAINT rep_fkey FOREIGN KEY (a) REFERENCES rep_ref(a);
SELECT 1 FROM citus_add_node('localhost', :worker_2_port);
SELECT count(*) FROM citus_shards WHERE table_name = 'rep_ref'::regclass AND nodeport = :worker_2_port;
SELECT replicate_reference_tables('block_writes');
SELECT count(*) FROM citus_shards WHERE table_name = 'rep_ref'::regclass AND nodeport = :worker_2_port;
-- test fix_partition_shard_index_names
SET citus.next_shard_id TO 3820000;
CREATE TABLE part_tbl_sing (dist_col int, another_col int, partition_col timestamp) PARTITION BY RANGE (partition_col);
SELECT create_distributed_table('part_tbl_sing', NULL, colocate_with:='none');
-- create a partition with a long name and another with a short name
CREATE TABLE partition_table_with_very_long_name PARTITION OF part_tbl_sing FOR VALUES FROM ('2018-01-01') TO ('2019-01-01');
CREATE TABLE p PARTITION OF part_tbl_sing FOR VALUES FROM ('2019-01-01') TO ('2020-01-01');
-- create an index on parent table
-- we will see that it doesn't matter whether we name the index on parent or not
-- indexes auto-generated on partitions will not use this name
-- SELECT fix_partition_shard_index_names('dist_partitioned_table') will be executed
-- automatically at the end of the CREATE INDEX command
CREATE INDEX short ON part_tbl_sing USING btree (another_col, partition_col);
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'null_dist_key_udfs' AND tablename SIMILAR TO 'p%' ORDER BY 1, 2;
SELECT nodeport AS part_tbl_sing_port
FROM citus_shards
WHERE table_name = 'part_tbl_sing'::regclass AND
nodeport IN (:worker_1_port, :worker_2_port) \gset
\c - - - :part_tbl_sing_port
-- the names are generated correctly
-- shard id has been appended to all index names which didn't end in shard id
-- this goes in line with Citus's way of naming indexes of shards: always append shardid to the end
SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'null_dist_key_udfs' AND tablename SIMILAR TO 'p%\_\d*' ORDER BY 1, 2;
\c - - - :master_port
SET search_path TO null_dist_key_udfs;
--test isolate_tenant_to_new_shard
CREATE TABLE iso_tbl (a INT);
SELECT create_distributed_table('iso_tbl', NULL, colocate_with:='none');
SELECT isolate_tenant_to_new_shard('iso_tbl', 5);
-- test replicate_table_shards
CREATE TABLE rep_tbl (a INT);
SELECT create_distributed_table('rep_tbl', NULL, colocate_with:='none');
SELECT replicate_table_shards('rep_tbl');
SET client_min_messages TO WARNING;
DROP SCHEMA null_dist_key_udfs CASCADE;