Test name needing to be quoted

pull/6029/head
Nitish Upreti 2022-07-08 21:44:35 -07:00
parent a515a49f4c
commit 7ef8ea948d
6 changed files with 196 additions and 49 deletions

View File

@ -197,7 +197,7 @@ CreateShardCopyDestReceivers(EState *estate, ShardInterval *shardIntervalToSplit
char *sourceShardNamePrefix = get_rel_name(shardIntervalToSplitCopy->relationId);
foreach_ptr(splitCopyInfo, splitCopyInfoList)
{
char *destinationShardSchemaOid = get_rel_namespace(
Oid destinationShardSchemaOid = get_rel_namespace(
shardIntervalToSplitCopy->relationId);
char *destinationShardSchemaName = get_namespace_name(destinationShardSchemaOid);
char *destinationShardNameCopy = pstrdup(sourceShardNamePrefix);

View File

@ -74,21 +74,21 @@ INSERT INTO sensors SELECT i, '2020-01-05', '{}', 11011.10, 'A', 'I <3 Citus' FR
-- END: Load data into tables.
-- BEGIN : Display current state.
-- TODO(niupre): Can we refactor this to be a function?
SELECT cls.relnamespace, shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport, placementid
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport, placementid
FROM pg_dist_shard AS shard
INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid
INNER JOIN pg_dist_node node ON placement.groupid = node.groupid
INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid
WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass)
ORDER BY logicalrelid, shardminvalue::BIGINT;
relnamespace | shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport | placementid
shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport | placementid
---------------------------------------------------------------------
18296 | 8981000 | sensors | -2147483648 | -1 | localhost | 57637 | 8610000
18296 | 8981001 | sensors | 0 | 2147483647 | localhost | 57638 | 8610001
18296 | 8981003 | colocated_dist_table | -2147483648 | -1 | localhost | 57637 | 8610004
18296 | 8981004 | colocated_dist_table | 0 | 2147483647 | localhost | 57638 | 8610005
18296 | 8981005 | table_with_index_rep_identity | -2147483648 | -1 | localhost | 57637 | 8610006
18296 | 8981006 | table_with_index_rep_identity | 0 | 2147483647 | localhost | 57638 | 8610007
8981000 | sensors | -2147483648 | -1 | localhost | 57637 | 8610000
8981001 | sensors | 0 | 2147483647 | localhost | 57638 | 8610001
8981003 | colocated_dist_table | -2147483648 | -1 | localhost | 57637 | 8610004
8981004 | colocated_dist_table | 0 | 2147483647 | localhost | 57638 | 8610005
8981005 | table_with_index_rep_identity | -2147483648 | -1 | localhost | 57637 | 8610006
8981006 | table_with_index_rep_identity | 0 | 2147483647 | localhost | 57638 | 8610007
(6 rows)
\c - - - :worker_1_port
@ -224,30 +224,30 @@ SELECT citus_move_shard_placement(8981007, 'localhost', :worker_1_port, 'localho
-- END : Move a shard post split.
-- BEGIN : Display current state.
-- TODO(niupre): Can we refactor this to be a function?
SELECT cls.relnamespace, shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport, placementid
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport, placementid
FROM pg_dist_shard AS shard
INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid
INNER JOIN pg_dist_node node ON placement.groupid = node.groupid
INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid
WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass)
ORDER BY logicalrelid, shardminvalue::BIGINT;
relnamespace | shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport | placementid
shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport | placementid
---------------------------------------------------------------------
18296 | 8981007 | sensors | -2147483648 | -1073741824 | localhost | 57638 | 135
18296 | 8981008 | sensors | -1073741823 | -1 | localhost | 57638 | 121
18296 | 8981013 | sensors | 0 | 536870911 | localhost | 57637 | 126
18296 | 8981014 | sensors | 536870912 | 1610612735 | localhost | 57637 | 127
18296 | 8981015 | sensors | 1610612736 | 2147483647 | localhost | 57638 | 128
18296 | 8981009 | colocated_dist_table | -2147483648 | -1073741824 | localhost | 57638 | 136
18296 | 8981010 | colocated_dist_table | -1073741823 | -1 | localhost | 57638 | 123
18296 | 8981016 | colocated_dist_table | 0 | 536870911 | localhost | 57637 | 129
18296 | 8981017 | colocated_dist_table | 536870912 | 1610612735 | localhost | 57637 | 130
18296 | 8981018 | colocated_dist_table | 1610612736 | 2147483647 | localhost | 57638 | 131
18296 | 8981011 | table_with_index_rep_identity | -2147483648 | -1073741824 | localhost | 57638 | 137
18296 | 8981012 | table_with_index_rep_identity | -1073741823 | -1 | localhost | 57638 | 125
18296 | 8981019 | table_with_index_rep_identity | 0 | 536870911 | localhost | 57637 | 132
18296 | 8981020 | table_with_index_rep_identity | 536870912 | 1610612735 | localhost | 57637 | 133
18296 | 8981021 | table_with_index_rep_identity | 1610612736 | 2147483647 | localhost | 57638 | 134
8981007 | sensors | -2147483648 | -1073741824 | localhost | 57638 | 136
8981008 | sensors | -1073741823 | -1 | localhost | 57638 | 122
8981013 | sensors | 0 | 536870911 | localhost | 57637 | 127
8981014 | sensors | 536870912 | 1610612735 | localhost | 57637 | 128
8981015 | sensors | 1610612736 | 2147483647 | localhost | 57638 | 129
8981009 | colocated_dist_table | -2147483648 | -1073741824 | localhost | 57638 | 137
8981010 | colocated_dist_table | -1073741823 | -1 | localhost | 57638 | 124
8981016 | colocated_dist_table | 0 | 536870911 | localhost | 57637 | 130
8981017 | colocated_dist_table | 536870912 | 1610612735 | localhost | 57637 | 131
8981018 | colocated_dist_table | 1610612736 | 2147483647 | localhost | 57638 | 132
8981011 | table_with_index_rep_identity | -2147483648 | -1073741824 | localhost | 57638 | 138
8981012 | table_with_index_rep_identity | -1073741823 | -1 | localhost | 57638 | 126
8981019 | table_with_index_rep_identity | 0 | 536870911 | localhost | 57637 | 133
8981020 | table_with_index_rep_identity | 536870912 | 1610612735 | localhost | 57637 | 134
8981021 | table_with_index_rep_identity | 1610612736 | 2147483647 | localhost | 57638 | 135
(15 rows)
\c - - - :worker_1_port
@ -377,33 +377,33 @@ SELECT pg_catalog.citus_split_shard_by_split_points(
(1 row)
SET search_path TO "citus_split_test_schema";
SELECT cls.relnamespace, shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport, placementid
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport, placementid
FROM pg_dist_shard AS shard
INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid
INNER JOIN pg_dist_node node ON placement.groupid = node.groupid
INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid
WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass)
ORDER BY logicalrelid, shardminvalue::BIGINT;
relnamespace | shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport | placementid
shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport | placementid
---------------------------------------------------------------------
18296 | 8981031 | sensors | -2147483648 | -2100000000 | localhost | 57637 | 138
18296 | 8981032 | sensors | -2099999999 | -1073741824 | localhost | 57638 | 139
18296 | 8981008 | sensors | -1073741823 | -1 | localhost | 57638 | 121
18296 | 8981013 | sensors | 0 | 536870911 | localhost | 57637 | 126
18296 | 8981014 | sensors | 536870912 | 1610612735 | localhost | 57637 | 127
18296 | 8981015 | sensors | 1610612736 | 2147483647 | localhost | 57638 | 128
18296 | 8981033 | colocated_dist_table | -2147483648 | -2100000000 | localhost | 57637 | 140
18296 | 8981034 | colocated_dist_table | -2099999999 | -1073741824 | localhost | 57638 | 141
18296 | 8981010 | colocated_dist_table | -1073741823 | -1 | localhost | 57638 | 123
18296 | 8981016 | colocated_dist_table | 0 | 536870911 | localhost | 57637 | 129
18296 | 8981017 | colocated_dist_table | 536870912 | 1610612735 | localhost | 57637 | 130
18296 | 8981018 | colocated_dist_table | 1610612736 | 2147483647 | localhost | 57638 | 131
18296 | 8981035 | table_with_index_rep_identity | -2147483648 | -2100000000 | localhost | 57637 | 142
18296 | 8981036 | table_with_index_rep_identity | -2099999999 | -1073741824 | localhost | 57638 | 143
18296 | 8981012 | table_with_index_rep_identity | -1073741823 | -1 | localhost | 57638 | 125
18296 | 8981019 | table_with_index_rep_identity | 0 | 536870911 | localhost | 57637 | 132
18296 | 8981020 | table_with_index_rep_identity | 536870912 | 1610612735 | localhost | 57637 | 133
18296 | 8981021 | table_with_index_rep_identity | 1610612736 | 2147483647 | localhost | 57638 | 134
8981031 | sensors | -2147483648 | -2100000000 | localhost | 57637 | 139
8981032 | sensors | -2099999999 | -1073741824 | localhost | 57638 | 140
8981008 | sensors | -1073741823 | -1 | localhost | 57638 | 122
8981013 | sensors | 0 | 536870911 | localhost | 57637 | 127
8981014 | sensors | 536870912 | 1610612735 | localhost | 57637 | 128
8981015 | sensors | 1610612736 | 2147483647 | localhost | 57638 | 129
8981033 | colocated_dist_table | -2147483648 | -2100000000 | localhost | 57637 | 141
8981034 | colocated_dist_table | -2099999999 | -1073741824 | localhost | 57638 | 142
8981010 | colocated_dist_table | -1073741823 | -1 | localhost | 57638 | 124
8981016 | colocated_dist_table | 0 | 536870911 | localhost | 57637 | 130
8981017 | colocated_dist_table | 536870912 | 1610612735 | localhost | 57637 | 131
8981018 | colocated_dist_table | 1610612736 | 2147483647 | localhost | 57638 | 132
8981035 | table_with_index_rep_identity | -2147483648 | -2100000000 | localhost | 57637 | 143
8981036 | table_with_index_rep_identity | -2099999999 | -1073741824 | localhost | 57638 | 144
8981012 | table_with_index_rep_identity | -1073741823 | -1 | localhost | 57638 | 126
8981019 | table_with_index_rep_identity | 0 | 536870911 | localhost | 57637 | 133
8981020 | table_with_index_rep_identity | 536870912 | 1610612735 | localhost | 57637 | 134
8981021 | table_with_index_rep_identity | 1610612736 | 2147483647 | localhost | 57638 | 135
(18 rows)
-- END: Split second time on another schema

View File

@ -0,0 +1,83 @@
CREATE SCHEMA worker_split_copy_test;
SET search_path TO worker_split_copy_test;
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 81070000;
-- BEGIN: Create distributed table and insert data.
CREATE TABLE worker_split_copy_test."test !/ \n _""dist_123_table"(id int primary key, value char);
SELECT create_distributed_table('"test !/ \n _""dist_123_table"', 'id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
INSERT INTO "test !/ \n _""dist_123_table" (id, value) (SELECT g.id, 'N' FROM generate_series(1, 100) AS g(id));
-- END: Create distributed table and insert data.
-- BEGIN: Switch to Worker1, Create target shards in worker for local 2-way split copy.
\c - - - :worker_1_port
CREATE TABLE worker_split_copy_test."test !/ \n _""dist_123_table_81070015"(id int primary key, value char);
CREATE TABLE worker_split_copy_test."test !/ \n _""dist_123_table_81070016"(id int primary key, value char);
-- End: Switch to Worker1, Create target shards in worker for local 2-way split copy.
-- BEGIN: List row count for source shard and targets shard in Worker1.
\c - - - :worker_1_port
SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070015";
count
---------------------------------------------------------------------
0
(1 row)
SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070016";
count
---------------------------------------------------------------------
0
(1 row)
-- END: List row count for source shard and targets shard in Worker1.
-- BEGIN: Set worker_1_node and worker_2_node
\c - - - :worker_1_port
SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset
SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset
-- END: Set worker_1_node and worker_2_node
-- BEGIN: Trigger 2-way local shard split copy.
-- Ensure we will perform text copy.
SET citus.enable_binary_protocol = false;
SELECT * from worker_split_copy(
81070000, -- source shard id to copy
ARRAY[
-- split copy info for split children 1
ROW(81070015, -- destination shard id
-2147483648, -- split range begin
1073741823, --split range end
:worker_1_node)::citus.split_copy_info,
-- split copy info for split children 2
ROW(81070016, --destination shard id
1073741824, --split range begin
2147483647, --split range end
:worker_1_node)::citus.split_copy_info
]
);
worker_split_copy
---------------------------------------------------------------------
(1 row)
-- END: Trigger 2-way local shard split copy.
-- BEGIN: List updated row count for local targets shard.
SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070015";
count
---------------------------------------------------------------------
72
(1 row)
SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070016";
count
---------------------------------------------------------------------
28
(1 row)
-- END: List updated row count for local targets shard.
-- BEGIN: CLEANUP.
\c - - - :master_port
SET client_min_messages TO WARNING;
DROP SCHEMA worker_split_copy_test CASCADE;
-- END: CLEANUP.

View File

@ -7,6 +7,7 @@ test: tablespace
# Helpers for foreign key catalogs.
test: foreign_key_to_reference_table
# Split tests go here.
test: worker_split_copy_test
test: worker_split_binary_copy_test
test: worker_split_text_copy_test
test: citus_split_shard_by_split_points_negative

View File

@ -64,7 +64,7 @@ INSERT INTO sensors SELECT i, '2020-01-05', '{}', 11011.10, 'A', 'I <3 Citus' FR
-- BEGIN : Display current state.
-- TODO(niupre): Can we refactor this to be a function?
SELECT cls.relnamespace, shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport, placementid
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport, placementid
FROM pg_dist_shard AS shard
INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid
INNER JOIN pg_dist_node node ON placement.groupid = node.groupid
@ -145,7 +145,7 @@ SELECT citus_move_shard_placement(8981007, 'localhost', :worker_1_port, 'localho
-- BEGIN : Display current state.
-- TODO(niupre): Can we refactor this to be a function?
SELECT cls.relnamespace, shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport, placementid
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport, placementid
FROM pg_dist_shard AS shard
INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid
INNER JOIN pg_dist_node node ON placement.groupid = node.groupid
@ -210,7 +210,7 @@ SELECT pg_catalog.citus_split_shard_by_split_points(
'blocking');
SET search_path TO "citus_split_test_schema";
SELECT cls.relnamespace, shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport, placementid
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport, placementid
FROM pg_dist_shard AS shard
INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid
INNER JOIN pg_dist_node node ON placement.groupid = node.groupid

View File

@ -0,0 +1,63 @@
CREATE SCHEMA worker_split_copy_test;
SET search_path TO worker_split_copy_test;
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 81070000;
-- BEGIN: Create distributed table and insert data.
CREATE TABLE worker_split_copy_test."test !/ \n _""dist_123_table"(id int primary key, value char);
SELECT create_distributed_table('"test !/ \n _""dist_123_table"', 'id');
INSERT INTO "test !/ \n _""dist_123_table" (id, value) (SELECT g.id, 'N' FROM generate_series(1, 100) AS g(id));
-- END: Create distributed table and insert data.
-- BEGIN: Switch to Worker1, Create target shards in worker for local 2-way split copy.
\c - - - :worker_1_port
CREATE TABLE worker_split_copy_test."test !/ \n _""dist_123_table_81070015"(id int primary key, value char);
CREATE TABLE worker_split_copy_test."test !/ \n _""dist_123_table_81070016"(id int primary key, value char);
-- End: Switch to Worker1, Create target shards in worker for local 2-way split copy.
-- BEGIN: List row count for source shard and targets shard in Worker1.
\c - - - :worker_1_port
SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070015";
SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070016";
-- END: List row count for source shard and targets shard in Worker1.
-- BEGIN: Set worker_1_node and worker_2_node
\c - - - :worker_1_port
SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset
SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset
-- END: Set worker_1_node and worker_2_node
-- BEGIN: Trigger 2-way local shard split copy.
-- Ensure we will perform text copy.
SET citus.enable_binary_protocol = false;
SELECT * from worker_split_copy(
81070000, -- source shard id to copy
ARRAY[
-- split copy info for split children 1
ROW(81070015, -- destination shard id
-2147483648, -- split range begin
1073741823, --split range end
:worker_1_node)::citus.split_copy_info,
-- split copy info for split children 2
ROW(81070016, --destination shard id
1073741824, --split range begin
2147483647, --split range end
:worker_1_node)::citus.split_copy_info
]
);
-- END: Trigger 2-way local shard split copy.
-- BEGIN: List updated row count for local targets shard.
SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070015";
SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070016";
-- END: List updated row count for local targets shard.
-- BEGIN: CLEANUP.
\c - - - :master_port
SET client_min_messages TO WARNING;
DROP SCHEMA worker_split_copy_test CASCADE;
-- END: CLEANUP.