diff --git a/src/backend/distributed/operations/split_shard_replication_setup.c b/src/backend/distributed/operations/split_shard_replication_setup.c index f2841ce95..92dee0202 100644 --- a/src/backend/distributed/operations/split_shard_replication_setup.c +++ b/src/backend/distributed/operations/split_shard_replication_setup.c @@ -89,7 +89,7 @@ static int NodeShardMappingHashCompare(const void *left, const void *right, Size * There is a 1-1 mapping between a table owner and a replication slot. One replication * slot takes care of replicating changes for all shards belonging to the same owner on a particular node. * - * During the replication phase, 'decoding_plugin_for_shard_split' will attach to the shared memory + * During the replication phase, WAL senders will attach to the shared memory * populated by current UDF. It routes the tuple from the source shard to the appropriate destination * shard for which the respective slot is responsible. */ diff --git a/src/test/regress/expected/split_shard_replication_setup.out b/src/test/regress/expected/split_shard_replication_setup.out index 8a229ced9..41d03cb3a 100644 --- a/src/test/regress/expected/split_shard_replication_setup.out +++ b/src/test/regress/expected/split_shard_replication_setup.out @@ -27,7 +27,7 @@ SELECT create_distributed_table('table_to_split','id'); -- ROW(1, 3 , 0 , 2147483647, 18 ) -- ] -- ); --- 5. Create Replication slot with 'decoding_plugin_for_shard_split' +-- 5. Create Replication slot with 'citus' -- 6. Setup Pub/Sub -- 7. Insert into table_to_split_1 at source worker1 -- 8. Expect the results in either table_to_split_2 or table_to_split_3 at worker2 diff --git a/src/test/regress/sql/split_shard_replication_setup.sql b/src/test/regress/sql/split_shard_replication_setup.sql index 52f0c290c..48cec6ce0 100644 --- a/src/test/regress/sql/split_shard_replication_setup.sql +++ b/src/test/regress/sql/split_shard_replication_setup.sql @@ -25,7 +25,7 @@ SELECT create_distributed_table('table_to_split','id'); -- ROW(1, 3 , 0 , 2147483647, 18 ) -- ] -- ); --- 5. Create Replication slot with 'decoding_plugin_for_shard_split' +-- 5. Create Replication slot with 'citus' -- 6. Setup Pub/Sub -- 7. Insert into table_to_split_1 at source worker1 -- 8. Expect the results in either table_to_split_2 or table_to_split_3 at worker2