mirror of https://github.com/citusdata/citus.git
Renamed UDF to worker_split_shard_replication_setup
parent
b425573b08
commit
672d198019
|
@ -19,7 +19,7 @@
|
|||
#include "utils/lsyscache.h"
|
||||
|
||||
/* declarations for dynamic loading */
|
||||
PG_FUNCTION_INFO_V1(split_shard_replication_setup);
|
||||
PG_FUNCTION_INFO_V1(worker_split_shard_replication_setup);
|
||||
|
||||
static HTAB *ShardInfoHashMap = NULL;
|
||||
|
||||
|
@ -53,8 +53,7 @@ static ShardSplitInfo * CreateShardSplitInfo(uint64 sourceShardIdToSplit,
|
|||
static void AddShardSplitInfoEntryForNodeInMap(ShardSplitInfo *shardSplitInfo);
|
||||
static void PopulateShardSplitInfoInSM(ShardSplitInfo *shardSplitInfoArray,
|
||||
HTAB *shardInfoHashMap,
|
||||
dsm_handle dsmHandle,
|
||||
int shardSplitInfoCount);
|
||||
dsm_handle dsmHandle);
|
||||
|
||||
static void SetupHashMapForShardInfo(void);
|
||||
static uint32 NodeShardMappingHash(const void *key, Size keysize);
|
||||
|
@ -62,7 +61,7 @@ static int NodeShardMappingHashCompare(const void *left, const void *right, Size
|
|||
|
||||
|
||||
/*
|
||||
* split_shard_replication_setup UDF creates in-memory data structures
|
||||
* worker_split_shard_replication_setup UDF creates in-memory data structures
|
||||
* to store the meta information about the shard undergoing split and new split
|
||||
* children along with their placements required during the catch up phase
|
||||
* of logical replication.
|
||||
|
@ -102,7 +101,7 @@ static int NodeShardMappingHashCompare(const void *left, const void *right, Size
|
|||
* responsible.
|
||||
*/
|
||||
Datum
|
||||
split_shard_replication_setup(PG_FUNCTION_ARGS)
|
||||
worker_split_shard_replication_setup(PG_FUNCTION_ARGS)
|
||||
{
|
||||
ArrayType *shardInfoArrayObject = PG_GETARG_ARRAYTYPE_P(0);
|
||||
int shardInfoArrayLength = ARR_DIMS(shardInfoArrayObject)[0];
|
||||
|
@ -145,8 +144,7 @@ split_shard_replication_setup(PG_FUNCTION_ARGS)
|
|||
|
||||
PopulateShardSplitInfoInSM(splitShardInfoSMArray,
|
||||
ShardInfoHashMap,
|
||||
dsmHandle,
|
||||
shardSplitInfoCount);
|
||||
dsmHandle);
|
||||
|
||||
return dsmHandle;
|
||||
}
|
||||
|
@ -429,8 +427,7 @@ AddShardSplitInfoEntryForNodeInMap(ShardSplitInfo *shardSplitInfo)
|
|||
static void
|
||||
PopulateShardSplitInfoInSM(ShardSplitInfo *shardSplitInfoArray,
|
||||
HTAB *shardInfoHashMap,
|
||||
dsm_handle dsmHandle,
|
||||
int shardSplitInfoCount)
|
||||
dsm_handle dsmHandle)
|
||||
{
|
||||
HASH_SEQ_STATUS status;
|
||||
hash_seq_init(&status, shardInfoHashMap);
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
CREATE OR REPLACE FUNCTION pg_catalog.split_shard_replication_setup(
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.worker_split_shard_replication_setup(
|
||||
shardInfo bigint[][])
|
||||
RETURNS bigint
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$split_shard_replication_setup$$;
|
||||
COMMENT ON FUNCTION pg_catalog.split_shard_replication_setup(shardInfo bigint[][])
|
||||
AS 'MODULE_PATHNAME', $$worker_split_shard_replication_setup$$;
|
||||
COMMENT ON FUNCTION pg_catalog.worker_split_shard_replication_setup(shardInfo bigint[][])
|
||||
IS 'Replication setup for splitting a shard'
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
CREATE OR REPLACE FUNCTION pg_catalog.split_shard_replication_setup(
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.worker_split_shard_replication_setup(
|
||||
shardInfo bigint[][])
|
||||
RETURNS bigint
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$split_shard_replication_setup$$;
|
||||
COMMENT ON FUNCTION pg_catalog.split_shard_replication_setup(shardInfo bigint[][])
|
||||
AS 'MODULE_PATHNAME', $$worker_split_shard_replication_setup$$;
|
||||
COMMENT ON FUNCTION pg_catalog.worker_split_shard_replication_setup(shardInfo bigint[][])
|
||||
IS 'Replication setup for splitting a shard'
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
*
|
||||
* shardsplit_shared_memory.h
|
||||
* API's for creating and accessing shared memory segments to store
|
||||
* shard split information. 'split_shard_replication_setup' UDF creates the
|
||||
* shard split information. 'worker_split_shard_replication_setup' UDF creates the
|
||||
* shared memory and populates the contents. WAL sender processes are consumer
|
||||
* of split information for appropriate tuple routing.
|
||||
*
|
||||
|
|
|
@ -6,7 +6,7 @@ DECLARE
|
|||
memoryId bigint := 0;
|
||||
memoryIdText text;
|
||||
begin
|
||||
SELECT * into memoryId from split_shard_replication_setup(ARRAY[ARRAY[1,2,-2147483648,-1, targetNode1], ARRAY[1,3,0,2147483647,targetNode2]]);
|
||||
SELECT * into memoryId from worker_split_shard_replication_setup(ARRAY[ARRAY[1,2,-2147483648,-1, targetNode1], ARRAY[1,3,0,2147483647,targetNode2]]);
|
||||
SELECT FORMAT('%s', memoryId) into memoryIdText;
|
||||
return memoryIdText;
|
||||
end
|
||||
|
@ -42,7 +42,7 @@ DECLARE
|
|||
memoryId bigint := 0;
|
||||
memoryIdText text;
|
||||
begin
|
||||
SELECT * into memoryId from split_shard_replication_setup(
|
||||
SELECT * into memoryId from worker_split_shard_replication_setup(
|
||||
ARRAY[
|
||||
ARRAY[4, 5, -2147483648,-1, targetNode1],
|
||||
ARRAY[4, 6, 0 ,2147483647, targetNode2],
|
||||
|
|
|
@ -242,6 +242,7 @@ ORDER BY 1;
|
|||
function worker_partitioned_table_size(regclass)
|
||||
function worker_record_sequence_dependency(regclass,regclass,name)
|
||||
function worker_save_query_explain_analyze(text,jsonb)
|
||||
function worker_split_shard_replication_setup(bigint[])
|
||||
schema citus
|
||||
schema citus_internal
|
||||
schema columnar
|
||||
|
|
|
@ -7,7 +7,7 @@ DECLARE
|
|||
memoryId bigint := 0;
|
||||
memoryIdText text;
|
||||
begin
|
||||
SELECT * into memoryId from split_shard_replication_setup(ARRAY[ARRAY[1,2,-2147483648,-1, targetNode1], ARRAY[1,3,0,2147483647,targetNode2]]);
|
||||
SELECT * into memoryId from worker_split_shard_replication_setup(ARRAY[ARRAY[1,2,-2147483648,-1, targetNode1], ARRAY[1,3,0,2147483647,targetNode2]]);
|
||||
SELECT FORMAT('%s', memoryId) into memoryIdText;
|
||||
return memoryIdText;
|
||||
end
|
||||
|
@ -45,7 +45,7 @@ DECLARE
|
|||
memoryId bigint := 0;
|
||||
memoryIdText text;
|
||||
begin
|
||||
SELECT * into memoryId from split_shard_replication_setup(
|
||||
SELECT * into memoryId from worker_split_shard_replication_setup(
|
||||
ARRAY[
|
||||
ARRAY[4, 5, -2147483648,-1, targetNode1],
|
||||
ARRAY[4, 6, 0 ,2147483647, targetNode2],
|
||||
|
|
Loading…
Reference in New Issue