mirror of https://github.com/citusdata/citus.git
Merge 6754a84e60
into fa5b8fb39f
commit
0e3f3d5b7a
|
@ -9,7 +9,7 @@ EXTVERSIONS = 5.0 5.0-1 5.0-2 \
|
||||||
5.1-1 5.1-2 5.1-3 5.1-4 5.1-5 5.1-6 5.1-7 5.1-8 \
|
5.1-1 5.1-2 5.1-3 5.1-4 5.1-5 5.1-6 5.1-7 5.1-8 \
|
||||||
5.2-1 5.2-2 5.2-3 5.2-4 \
|
5.2-1 5.2-2 5.2-3 5.2-4 \
|
||||||
6.0-1 6.0-2 6.0-3 6.0-4 6.0-5 6.0-6 6.0-7 6.0-8 6.0-9 6.0-10 6.0-11 6.0-12 6.0-13 6.0-14 6.0-15 6.0-16 6.0-17 6.0-18 \
|
6.0-1 6.0-2 6.0-3 6.0-4 6.0-5 6.0-6 6.0-7 6.0-8 6.0-9 6.0-10 6.0-11 6.0-12 6.0-13 6.0-14 6.0-15 6.0-16 6.0-17 6.0-18 \
|
||||||
6.1-1 6.1-2 6.1-3 6.1-4 6.1-5 6.1-6 6.1-7 6.1-8 6.1-9 6.1-10 6.1-11 6.1-12 6.1-13 6.1-14 6.1-15 6.1-16 6.1-17
|
6.1-1 6.1-2 6.1-3 6.1-4 6.1-5 6.1-6 6.1-7 6.1-8 6.1-9 6.1-10 6.1-11 6.1-12 6.1-13 6.1-14 6.1-15 6.1-16 6.1-17 6.1-18
|
||||||
|
|
||||||
# All citus--*.sql files in the source directory
|
# All citus--*.sql files in the source directory
|
||||||
DATA = $(patsubst $(citus_abs_srcdir)/%.sql,%.sql,$(wildcard $(citus_abs_srcdir)/$(EXTENSION)--*--*.sql))
|
DATA = $(patsubst $(citus_abs_srcdir)/%.sql,%.sql,$(wildcard $(citus_abs_srcdir)/$(EXTENSION)--*--*.sql))
|
||||||
|
@ -129,6 +129,8 @@ $(EXTENSION)--6.1-16.sql: $(EXTENSION)--6.1-15.sql $(EXTENSION)--6.1-15--6.1-16.
|
||||||
cat $^ > $@
|
cat $^ > $@
|
||||||
$(EXTENSION)--6.1-17.sql: $(EXTENSION)--6.1-16.sql $(EXTENSION)--6.1-16--6.1-17.sql
|
$(EXTENSION)--6.1-17.sql: $(EXTENSION)--6.1-16.sql $(EXTENSION)--6.1-16--6.1-17.sql
|
||||||
cat $^ > $@
|
cat $^ > $@
|
||||||
|
$(EXTENSION)--6.1-18.sql: $(EXTENSION)--6.1-17.sql $(EXTENSION)--6.1-17--6.1-18.sql
|
||||||
|
cat $^ > $@
|
||||||
|
|
||||||
NO_PGXS = 1
|
NO_PGXS = 1
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,24 @@
|
||||||
|
/* citus--6.1-17--6.1-18.sql */
|
||||||
|
|
||||||
|
SET search_path = 'pg_catalog';
|
||||||
|
|
||||||
|
ALTER TABLE pg_dist_node ADD COLUMN noderole "char" NOT NULL DEFAULT 'p';
|
||||||
|
|
||||||
|
DROP FUNCTION IF EXISTS master_add_node(text, integer);
|
||||||
|
|
||||||
|
CREATE FUNCTION master_add_node(nodename text,
|
||||||
|
nodeport integer,
|
||||||
|
OUT nodeid integer,
|
||||||
|
OUT groupid integer,
|
||||||
|
OUT nodename text,
|
||||||
|
OUT nodeport integer,
|
||||||
|
OUT noderack text,
|
||||||
|
OUT hasmetadata boolean,
|
||||||
|
OUT noderole "char")
|
||||||
|
RETURNS record
|
||||||
|
LANGUAGE C STRICT
|
||||||
|
AS 'MODULE_PATHNAME', $$master_add_node$$;
|
||||||
|
COMMENT ON FUNCTION master_add_node(nodename text, nodeport integer)
|
||||||
|
IS 'add node to the cluster';
|
||||||
|
|
||||||
|
RESET search_path;
|
|
@ -1,6 +1,6 @@
|
||||||
# Citus extension
|
# Citus extension
|
||||||
comment = 'Citus distributed database'
|
comment = 'Citus distributed database'
|
||||||
default_version = '6.1-17'
|
default_version = '6.1-18'
|
||||||
module_pathname = '$libdir/citus'
|
module_pathname = '$libdir/citus'
|
||||||
relocatable = false
|
relocatable = false
|
||||||
schema = pg_catalog
|
schema = pg_catalog
|
||||||
|
|
|
@ -399,7 +399,7 @@ master_get_local_first_candidate_nodes(PG_FUNCTION_ARGS)
|
||||||
functionContext->max_calls = ShardReplicationFactor;
|
functionContext->max_calls = ShardReplicationFactor;
|
||||||
|
|
||||||
/* if enough live nodes, return an extra candidate node as backup */
|
/* if enough live nodes, return an extra candidate node as backup */
|
||||||
liveNodeCount = WorkerGetLiveNodeCount();
|
liveNodeCount = WorkerGetLiveGroupCount();
|
||||||
if (liveNodeCount > ShardReplicationFactor)
|
if (liveNodeCount > ShardReplicationFactor)
|
||||||
{
|
{
|
||||||
functionContext->max_calls = ShardReplicationFactor + 1;
|
functionContext->max_calls = ShardReplicationFactor + 1;
|
||||||
|
|
|
@ -141,7 +141,7 @@ master_create_empty_shard(PG_FUNCTION_ARGS)
|
||||||
|
|
||||||
/* if enough live nodes, add an extra candidate node as backup */
|
/* if enough live nodes, add an extra candidate node as backup */
|
||||||
attemptableNodeCount = ShardReplicationFactor;
|
attemptableNodeCount = ShardReplicationFactor;
|
||||||
liveNodeCount = WorkerGetLiveNodeCount();
|
liveNodeCount = WorkerGetLiveGroupCount();
|
||||||
if (liveNodeCount > ShardReplicationFactor)
|
if (liveNodeCount > ShardReplicationFactor)
|
||||||
{
|
{
|
||||||
attemptableNodeCount = ShardReplicationFactor + 1;
|
attemptableNodeCount = ShardReplicationFactor + 1;
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
#include "distributed/worker_manager.h"
|
#include "distributed/worker_manager.h"
|
||||||
#include "distributed/metadata_cache.h"
|
#include "distributed/metadata_cache.h"
|
||||||
#include "distributed/multi_client_executor.h"
|
#include "distributed/multi_client_executor.h"
|
||||||
|
#include "distributed/pg_dist_node.h"
|
||||||
#include "libpq/hba.h"
|
#include "libpq/hba.h"
|
||||||
#include "libpq/ip.h"
|
#include "libpq/ip.h"
|
||||||
#include "libpq/libpq-be.h"
|
#include "libpq/libpq-be.h"
|
||||||
|
@ -37,11 +38,13 @@ int MaxWorkerNodesTracked = 2048; /* determines worker node hash table size *
|
||||||
|
|
||||||
/* Local functions forward declarations */
|
/* Local functions forward declarations */
|
||||||
static char * ClientHostAddress(StringInfo remoteHostStringInfo);
|
static char * ClientHostAddress(StringInfo remoteHostStringInfo);
|
||||||
static WorkerNode * FindRandomNodeNotInList(HTAB *WorkerNodesHash,
|
static WorkerNode * FindRandomNodeFromList(List *workerNodeList);
|
||||||
List *currentNodeList);
|
static WorkerNode * WorkerGetNodeWithName(const char *hostname);
|
||||||
static bool OddNumber(uint32 number);
|
static bool OddNumber(uint32 number);
|
||||||
static bool ListMember(List *currentList, WorkerNode *workerNode);
|
static bool ListMember(List *currentList, WorkerNode *workerNode);
|
||||||
|
|
||||||
|
static List * PrimaryNodesNotInList(List *currentList);
|
||||||
|
|
||||||
|
|
||||||
/* ------------------------------------------------------------
|
/* ------------------------------------------------------------
|
||||||
* Worker node selection functions follow
|
* Worker node selection functions follow
|
||||||
|
@ -50,8 +53,8 @@ static bool ListMember(List *currentList, WorkerNode *workerNode);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* WorkerGetRandomCandidateNode takes in a list of worker nodes, and then allocates
|
* WorkerGetRandomCandidateNode takes in a list of worker nodes, and then allocates
|
||||||
* a new worker node. The allocation is performed by randomly picking a worker node
|
* a new worker node. The allocation is performed by randomly picking a primary worker
|
||||||
* which is not in currentNodeList.
|
* node which is not in currentNodeList.
|
||||||
*
|
*
|
||||||
* Note that the function returns null if the worker membership list does not
|
* Note that the function returns null if the worker membership list does not
|
||||||
* contain enough nodes to allocate a new worker node.
|
* contain enough nodes to allocate a new worker node.
|
||||||
|
@ -64,16 +67,15 @@ WorkerGetRandomCandidateNode(List *currentNodeList)
|
||||||
uint32 tryCount = WORKER_RACK_TRIES;
|
uint32 tryCount = WORKER_RACK_TRIES;
|
||||||
uint32 tryIndex = 0;
|
uint32 tryIndex = 0;
|
||||||
|
|
||||||
HTAB *workerNodeHash = GetWorkerNodeHash();
|
uint32 currentNodeCount = list_length(currentNodeList);
|
||||||
|
List *candidateWorkerNodeList = PrimaryNodesNotInList(currentNodeList);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We check if the shard has already been placed on all nodes known to us.
|
* We check if the shard has already been placed on all nodes known to us.
|
||||||
* This check is rather defensive, and has the drawback of performing a full
|
* This check is rather defensive, and has the drawback of performing a full
|
||||||
* scan over the worker node hash for determining the number of live nodes.
|
* scan over the worker node hash.
|
||||||
*/
|
*/
|
||||||
uint32 currentNodeCount = list_length(currentNodeList);
|
if (list_length(candidateWorkerNodeList) == 0)
|
||||||
uint32 liveNodeCount = WorkerGetLiveNodeCount();
|
|
||||||
if (currentNodeCount >= liveNodeCount)
|
|
||||||
{
|
{
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -81,7 +83,7 @@ WorkerGetRandomCandidateNode(List *currentNodeList)
|
||||||
/* if current node list is empty, randomly pick one node and return */
|
/* if current node list is empty, randomly pick one node and return */
|
||||||
if (currentNodeCount == 0)
|
if (currentNodeCount == 0)
|
||||||
{
|
{
|
||||||
workerNode = FindRandomNodeNotInList(workerNodeHash, NIL);
|
workerNode = FindRandomNodeFromList(candidateWorkerNodeList);
|
||||||
return workerNode;
|
return workerNode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -111,7 +113,7 @@ WorkerGetRandomCandidateNode(List *currentNodeList)
|
||||||
char *workerRack = NULL;
|
char *workerRack = NULL;
|
||||||
bool sameRack = false;
|
bool sameRack = false;
|
||||||
|
|
||||||
workerNode = FindRandomNodeNotInList(workerNodeHash, currentNodeList);
|
workerNode = FindRandomNodeFromList(candidateWorkerNodeList);
|
||||||
workerRack = workerNode->workerRack;
|
workerRack = workerNode->workerRack;
|
||||||
|
|
||||||
sameRack = (strncmp(workerRack, firstRack, WORKER_LENGTH) == 0);
|
sameRack = (strncmp(workerRack, firstRack, WORKER_LENGTH) == 0);
|
||||||
|
@ -156,7 +158,7 @@ WorkerGetRoundRobinCandidateNode(List *workerNodeList, uint64 shardId,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* WorkerGetLocalFirstCandidateNode takes in a list of worker nodes, and then
|
* WorkerGetLocalFirstCandidateNode takes in a list of worker nodes, and then
|
||||||
* allocates a new worker node. The allocation is performed according to the
|
* allocates a new primary worker node. The allocation is performed according to the
|
||||||
* following policy: if the list is empty, the node where the caller is connecting
|
* following policy: if the list is empty, the node where the caller is connecting
|
||||||
* from is allocated; if the list is not empty, a node is allocated according
|
* from is allocated; if the list is not empty, a node is allocated according
|
||||||
* to random policy.
|
* to random policy.
|
||||||
|
@ -269,10 +271,10 @@ ClientHostAddress(StringInfo clientHostStringInfo)
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* WorkerGetNodeWithName finds and returns a node from the membership list that
|
* WorkerGetNodeWithName finds and returns a primary node from the membership list that
|
||||||
* has the given hostname. The function returns null if no such node exists.
|
* has the given hostname. The function returns null if no such node exists.
|
||||||
*/
|
*/
|
||||||
WorkerNode *
|
static WorkerNode *
|
||||||
WorkerGetNodeWithName(const char *hostname)
|
WorkerGetNodeWithName(const char *hostname)
|
||||||
{
|
{
|
||||||
WorkerNode *workerNode = NULL;
|
WorkerNode *workerNode = NULL;
|
||||||
|
@ -284,7 +286,7 @@ WorkerGetNodeWithName(const char *hostname)
|
||||||
while ((workerNode = hash_seq_search(&status)) != NULL)
|
while ((workerNode = hash_seq_search(&status)) != NULL)
|
||||||
{
|
{
|
||||||
int nameCompare = strncmp(workerNode->workerName, hostname, WORKER_LENGTH);
|
int nameCompare = strncmp(workerNode->workerName, hostname, WORKER_LENGTH);
|
||||||
if (nameCompare == 0)
|
if (nameCompare == 0 && workerNode->nodeRole == NODE_ROLE_PRIMARY)
|
||||||
{
|
{
|
||||||
/* we need to terminate the scan since we break */
|
/* we need to terminate the scan since we break */
|
||||||
hash_seq_term(&status);
|
hash_seq_term(&status);
|
||||||
|
@ -297,13 +299,26 @@ WorkerGetNodeWithName(const char *hostname)
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* WorkerGetLiveNodeCount returns the number of live nodes in the cluster.
|
* WorkerGetLiveGroupCount returns the number of groups which have a primary capable of
|
||||||
* */
|
* accepting writes.
|
||||||
|
*/
|
||||||
uint32
|
uint32
|
||||||
WorkerGetLiveNodeCount(void)
|
WorkerGetLiveGroupCount(void)
|
||||||
{
|
{
|
||||||
HTAB *workerNodeHash = GetWorkerNodeHash();
|
HTAB *workerNodeHash = GetWorkerNodeHash();
|
||||||
uint32 liveWorkerCount = hash_get_num_entries(workerNodeHash);
|
uint32 liveWorkerCount = 0;
|
||||||
|
HASH_SEQ_STATUS status;
|
||||||
|
WorkerNode *workerNode = NULL;
|
||||||
|
|
||||||
|
hash_seq_init(&status, workerNodeHash);
|
||||||
|
|
||||||
|
while ((workerNode = hash_seq_search(&status)) != NULL)
|
||||||
|
{
|
||||||
|
if (workerNode->nodeRole == NODE_ROLE_PRIMARY)
|
||||||
|
{
|
||||||
|
liveWorkerCount++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return liveWorkerCount;
|
return liveWorkerCount;
|
||||||
}
|
}
|
||||||
|
@ -312,6 +327,8 @@ WorkerGetLiveNodeCount(void)
|
||||||
/*
|
/*
|
||||||
* WorkerNodeList iterates over the hash table that includes the worker nodes, and adds
|
* WorkerNodeList iterates over the hash table that includes the worker nodes, and adds
|
||||||
* them to a list which is returned.
|
* them to a list which is returned.
|
||||||
|
*
|
||||||
|
* It only returns nodes which are primaries.
|
||||||
*/
|
*/
|
||||||
List *
|
List *
|
||||||
WorkerNodeList(void)
|
WorkerNodeList(void)
|
||||||
|
@ -325,7 +342,14 @@ WorkerNodeList(void)
|
||||||
|
|
||||||
while ((workerNode = hash_seq_search(&status)) != NULL)
|
while ((workerNode = hash_seq_search(&status)) != NULL)
|
||||||
{
|
{
|
||||||
WorkerNode *workerNodeCopy = palloc0(sizeof(WorkerNode));
|
WorkerNode *workerNodeCopy;
|
||||||
|
|
||||||
|
if (workerNode->nodeRole != NODE_ROLE_PRIMARY)
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
workerNodeCopy = palloc0(sizeof(WorkerNode));
|
||||||
memcpy(workerNodeCopy, workerNode, sizeof(WorkerNode));
|
memcpy(workerNodeCopy, workerNode, sizeof(WorkerNode));
|
||||||
workerNodeList = lappend(workerNodeList, workerNodeCopy);
|
workerNodeList = lappend(workerNodeList, workerNodeCopy);
|
||||||
}
|
}
|
||||||
|
@ -335,70 +359,50 @@ WorkerNodeList(void)
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FindRandomNodeNotInList finds a random node from the shared hash that is not
|
* WorkerNodesNotInList scans through the worker node hash and returns a list
|
||||||
* a member of the current node list. The caller is responsible for making the
|
* of all primary nodes which are not in currentList.
|
||||||
* necessary node count checks to ensure that such a node exists.
|
|
||||||
*
|
|
||||||
* Note that this function has a selection bias towards nodes whose positions in
|
|
||||||
* the shared hash are sequentially adjacent to the positions of nodes that are
|
|
||||||
* in the current node list. This bias follows from our decision to first pick a
|
|
||||||
* random node in the hash, and if that node is a member of the current list, to
|
|
||||||
* simply iterate to the next node in the hash. Overall, this approach trades in
|
|
||||||
* some selection bias for simplicity in design and for bounded execution time.
|
|
||||||
*/
|
*/
|
||||||
static WorkerNode *
|
static List *
|
||||||
FindRandomNodeNotInList(HTAB *WorkerNodesHash, List *currentNodeList)
|
PrimaryNodesNotInList(List *currentList)
|
||||||
{
|
{
|
||||||
|
List *workerNodeList = NIL;
|
||||||
|
HTAB *workerNodeHash = GetWorkerNodeHash();
|
||||||
WorkerNode *workerNode = NULL;
|
WorkerNode *workerNode = NULL;
|
||||||
HASH_SEQ_STATUS status;
|
HASH_SEQ_STATUS status;
|
||||||
uint32 workerNodeCount = 0;
|
|
||||||
uint32 currentNodeCount PG_USED_FOR_ASSERTS_ONLY = 0;
|
|
||||||
bool lookForWorkerNode = true;
|
|
||||||
uint32 workerPosition = 0;
|
|
||||||
uint32 workerIndex = 0;
|
|
||||||
|
|
||||||
workerNodeCount = hash_get_num_entries(WorkerNodesHash);
|
hash_seq_init(&status, workerNodeHash);
|
||||||
currentNodeCount = list_length(currentNodeList);
|
|
||||||
Assert(workerNodeCount > currentNodeCount);
|
|
||||||
|
|
||||||
/*
|
/* this is O(n*m) but there usually aren't many nodes in currentList */
|
||||||
* We determine a random position within the worker hash between [1, N],
|
while ((workerNode = hash_seq_search(&status)) != NULL)
|
||||||
* assuming that the number of elements in the hash is N. We then get to
|
|
||||||
* this random position by iterating over the worker hash. Please note that
|
|
||||||
* the random seed has already been set by the postmaster when starting up.
|
|
||||||
*/
|
|
||||||
workerPosition = (random() % workerNodeCount) + 1;
|
|
||||||
hash_seq_init(&status, WorkerNodesHash);
|
|
||||||
|
|
||||||
for (workerIndex = 0; workerIndex < workerPosition; workerIndex++)
|
|
||||||
{
|
{
|
||||||
workerNode = (WorkerNode *) hash_seq_search(&status);
|
WorkerNode *workerNodeCopy;
|
||||||
|
|
||||||
|
if ((workerNode->nodeRole != NODE_ROLE_PRIMARY) ||
|
||||||
|
ListMember(currentList, workerNode))
|
||||||
|
{
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
while (lookForWorkerNode)
|
workerNodeCopy = palloc0(sizeof(WorkerNode));
|
||||||
{
|
memcpy(workerNodeCopy, workerNode, sizeof(WorkerNode));
|
||||||
bool listMember = ListMember(currentNodeList, workerNode);
|
workerNodeList = lappend(workerNodeList, workerNodeCopy);
|
||||||
|
|
||||||
if (!listMember)
|
|
||||||
{
|
|
||||||
lookForWorkerNode = false;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
/* iterate to the next worker node in the hash */
|
|
||||||
workerNode = (WorkerNode *) hash_seq_search(&status);
|
|
||||||
|
|
||||||
/* reached end of hash; start from the beginning */
|
|
||||||
if (workerNode == NULL)
|
|
||||||
{
|
|
||||||
hash_seq_init(&status, WorkerNodesHash);
|
|
||||||
workerNode = (WorkerNode *) hash_seq_search(&status);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* we stopped scanning before completion; therefore clean up scan */
|
return workerNodeList;
|
||||||
hash_seq_term(&status);
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* FindRandomNodeFromList picks a random node from the list provided to it */
|
||||||
|
static WorkerNode *
|
||||||
|
FindRandomNodeFromList(List *candidateWorkerNodeList)
|
||||||
|
{
|
||||||
|
uint32 candidateNodeCount = list_length(candidateWorkerNodeList);
|
||||||
|
|
||||||
|
/* nb, the random seed has already been set by the postmaster when starting up. */
|
||||||
|
uint32 workerPosition = (random() % candidateNodeCount);
|
||||||
|
|
||||||
|
WorkerNode *workerNode = (WorkerNode *) list_nth(candidateWorkerNodeList,
|
||||||
|
workerPosition);
|
||||||
|
|
||||||
return workerNode;
|
return workerNode;
|
||||||
}
|
}
|
||||||
|
|
|
@ -402,7 +402,7 @@ NodeListInsertCommand(List *workerNodeList)
|
||||||
/* generate the query without any values yet */
|
/* generate the query without any values yet */
|
||||||
appendStringInfo(nodeListInsertCommand,
|
appendStringInfo(nodeListInsertCommand,
|
||||||
"INSERT INTO pg_dist_node "
|
"INSERT INTO pg_dist_node "
|
||||||
"(nodeid, groupid, nodename, nodeport, noderack, hasmetadata) "
|
"(nodeid, groupid, nodename, nodeport, noderack, hasmetadata, noderole) "
|
||||||
"VALUES ");
|
"VALUES ");
|
||||||
|
|
||||||
/* iterate over the worker nodes, add the values */
|
/* iterate over the worker nodes, add the values */
|
||||||
|
@ -412,13 +412,14 @@ NodeListInsertCommand(List *workerNodeList)
|
||||||
char *hasMetadaString = workerNode->hasMetadata ? "TRUE" : "FALSE";
|
char *hasMetadaString = workerNode->hasMetadata ? "TRUE" : "FALSE";
|
||||||
|
|
||||||
appendStringInfo(nodeListInsertCommand,
|
appendStringInfo(nodeListInsertCommand,
|
||||||
"(%d, %d, %s, %d, %s, %s)",
|
"(%d, %d, %s, %d, %s, %s, '%c')",
|
||||||
workerNode->nodeId,
|
workerNode->nodeId,
|
||||||
workerNode->groupId,
|
workerNode->groupId,
|
||||||
quote_literal_cstr(workerNode->workerName),
|
quote_literal_cstr(workerNode->workerName),
|
||||||
workerNode->workerPort,
|
workerNode->workerPort,
|
||||||
quote_literal_cstr(workerNode->workerRack),
|
quote_literal_cstr(workerNode->workerRack),
|
||||||
hasMetadaString);
|
hasMetadaString,
|
||||||
|
workerNode->nodeRole);
|
||||||
|
|
||||||
processedWorkerNodeCount++;
|
processedWorkerNodeCount++;
|
||||||
if (processedWorkerNodeCount != workerCount)
|
if (processedWorkerNodeCount != workerCount)
|
||||||
|
|
|
@ -1834,7 +1834,7 @@ BuildMapMergeJob(Query *jobQuery, List *dependedJobList, Var *partitionKey,
|
||||||
static uint32
|
static uint32
|
||||||
HashPartitionCount(void)
|
HashPartitionCount(void)
|
||||||
{
|
{
|
||||||
uint32 nodeCount = WorkerGetLiveNodeCount();
|
uint32 nodeCount = WorkerGetLiveGroupCount();
|
||||||
double maxReduceTasksPerNode = MaxRunningTasksPerNode / 2.0;
|
double maxReduceTasksPerNode = MaxRunningTasksPerNode / 2.0;
|
||||||
|
|
||||||
uint32 partitionCount = (uint32) rint(nodeCount * maxReduceTasksPerNode);
|
uint32 partitionCount = (uint32) rint(nodeCount * maxReduceTasksPerNode);
|
||||||
|
|
|
@ -1625,6 +1625,7 @@ InitializeWorkerNodeCache(void)
|
||||||
workerNode->nodeId = currentNode->nodeId;
|
workerNode->nodeId = currentNode->nodeId;
|
||||||
strlcpy(workerNode->workerRack, currentNode->workerRack, WORKER_LENGTH);
|
strlcpy(workerNode->workerRack, currentNode->workerRack, WORKER_LENGTH);
|
||||||
workerNode->hasMetadata = currentNode->hasMetadata;
|
workerNode->hasMetadata = currentNode->hasMetadata;
|
||||||
|
workerNode->nodeRole = currentNode->nodeRole;
|
||||||
|
|
||||||
if (handleFound)
|
if (handleFound)
|
||||||
{
|
{
|
||||||
|
|
|
@ -53,13 +53,14 @@ int GroupSize = 1;
|
||||||
/* local function forward declarations */
|
/* local function forward declarations */
|
||||||
static void RemoveNodeFromCluster(char *nodeName, int32 nodePort, bool forceRemove);
|
static void RemoveNodeFromCluster(char *nodeName, int32 nodePort, bool forceRemove);
|
||||||
static Datum AddNodeMetadata(char *nodeName, int32 nodePort, int32 groupId,
|
static Datum AddNodeMetadata(char *nodeName, int32 nodePort, int32 groupId,
|
||||||
char *nodeRack, bool hasMetadata, bool *nodeAlreadyExists);
|
char *nodeRack, bool hasMetadata, char noderole,
|
||||||
|
bool *nodeAlreadyExists);
|
||||||
static Datum GenerateNodeTuple(WorkerNode *workerNode);
|
static Datum GenerateNodeTuple(WorkerNode *workerNode);
|
||||||
static int32 GetNextGroupId(void);
|
static int32 GetNextGroupId(void);
|
||||||
static uint32 GetMaxGroupId(void);
|
static uint32 GetMaxGroupId(void);
|
||||||
static int GetNextNodeId(void);
|
static int GetNextNodeId(void);
|
||||||
static void InsertNodeRow(int nodeid, char *nodename, int32 nodeport, uint32 groupId,
|
static void InsertNodeRow(int nodeid, char *nodename, int32 nodeport, uint32 groupId,
|
||||||
char *nodeRack, bool hasMetadata);
|
char *nodeRack, bool hasMetadata, char noderole);
|
||||||
static void DeleteNodeRow(char *nodename, int32 nodeport);
|
static void DeleteNodeRow(char *nodename, int32 nodeport);
|
||||||
static List * ParseWorkerNodeFileAndRename(void);
|
static List * ParseWorkerNodeFileAndRename(void);
|
||||||
static WorkerNode * TupleToWorkerNode(TupleDesc tupleDescriptor, HeapTuple heapTuple);
|
static WorkerNode * TupleToWorkerNode(TupleDesc tupleDescriptor, HeapTuple heapTuple);
|
||||||
|
@ -88,7 +89,8 @@ master_add_node(PG_FUNCTION_ARGS)
|
||||||
bool nodeAlreadyExists = false;
|
bool nodeAlreadyExists = false;
|
||||||
|
|
||||||
Datum returnData = AddNodeMetadata(nodeNameString, nodePort, groupId, nodeRack,
|
Datum returnData = AddNodeMetadata(nodeNameString, nodePort, groupId, nodeRack,
|
||||||
hasMetadata, &nodeAlreadyExists);
|
hasMetadata, NODE_ROLE_PRIMARY,
|
||||||
|
&nodeAlreadyExists);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* After adding new node, if the node is not already exist, we replicate all existing
|
* After adding new node, if the node is not already exist, we replicate all existing
|
||||||
|
@ -166,7 +168,8 @@ master_initialize_node_metadata(PG_FUNCTION_ARGS)
|
||||||
WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell);
|
WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell);
|
||||||
|
|
||||||
AddNodeMetadata(workerNode->workerName, workerNode->workerPort, 0,
|
AddNodeMetadata(workerNode->workerName, workerNode->workerPort, 0,
|
||||||
workerNode->workerRack, false, &nodeAlreadyExists);
|
workerNode->workerRack, false, NODE_ROLE_PRIMARY,
|
||||||
|
&nodeAlreadyExists);
|
||||||
}
|
}
|
||||||
|
|
||||||
PG_RETURN_BOOL(true);
|
PG_RETURN_BOOL(true);
|
||||||
|
@ -414,7 +417,7 @@ RemoveNodeFromCluster(char *nodeName, int32 nodePort, bool forceRemove)
|
||||||
*/
|
*/
|
||||||
static Datum
|
static Datum
|
||||||
AddNodeMetadata(char *nodeName, int32 nodePort, int32 groupId, char *nodeRack,
|
AddNodeMetadata(char *nodeName, int32 nodePort, int32 groupId, char *nodeRack,
|
||||||
bool hasMetadata, bool *nodeAlreadyExists)
|
bool hasMetadata, char noderole, bool *nodeAlreadyExists)
|
||||||
{
|
{
|
||||||
Relation pgDistNode = NULL;
|
Relation pgDistNode = NULL;
|
||||||
int nextNodeIdInt = 0;
|
int nextNodeIdInt = 0;
|
||||||
|
@ -465,7 +468,8 @@ AddNodeMetadata(char *nodeName, int32 nodePort, int32 groupId, char *nodeRack,
|
||||||
/* generate the new node id from the sequence */
|
/* generate the new node id from the sequence */
|
||||||
nextNodeIdInt = GetNextNodeId();
|
nextNodeIdInt = GetNextNodeId();
|
||||||
|
|
||||||
InsertNodeRow(nextNodeIdInt, nodeName, nodePort, groupId, nodeRack, hasMetadata);
|
InsertNodeRow(nextNodeIdInt, nodeName, nodePort, groupId,
|
||||||
|
nodeRack, hasMetadata, noderole);
|
||||||
|
|
||||||
workerNode = FindWorkerNode(nodeName, nodePort);
|
workerNode = FindWorkerNode(nodeName, nodePort);
|
||||||
|
|
||||||
|
@ -512,6 +516,7 @@ GenerateNodeTuple(WorkerNode *workerNode)
|
||||||
values[Anum_pg_dist_node_nodeport - 1] = UInt32GetDatum(workerNode->workerPort);
|
values[Anum_pg_dist_node_nodeport - 1] = UInt32GetDatum(workerNode->workerPort);
|
||||||
values[Anum_pg_dist_node_noderack - 1] = CStringGetTextDatum(workerNode->workerRack);
|
values[Anum_pg_dist_node_noderack - 1] = CStringGetTextDatum(workerNode->workerRack);
|
||||||
values[Anum_pg_dist_node_hasmetadata - 1] = BoolGetDatum(workerNode->hasMetadata);
|
values[Anum_pg_dist_node_hasmetadata - 1] = BoolGetDatum(workerNode->hasMetadata);
|
||||||
|
values[Anum_pg_dist_node_noderole - 1] = CharGetDatum(workerNode->nodeRole);
|
||||||
|
|
||||||
/* open shard relation and insert new tuple */
|
/* open shard relation and insert new tuple */
|
||||||
pgDistNode = heap_open(DistNodeRelationId(), AccessShareLock);
|
pgDistNode = heap_open(DistNodeRelationId(), AccessShareLock);
|
||||||
|
@ -650,7 +655,7 @@ EnsureCoordinator(void)
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
InsertNodeRow(int nodeid, char *nodeName, int32 nodePort, uint32 groupId, char *nodeRack,
|
InsertNodeRow(int nodeid, char *nodeName, int32 nodePort, uint32 groupId, char *nodeRack,
|
||||||
bool hasMetadata)
|
bool hasMetadata, char noderole)
|
||||||
{
|
{
|
||||||
Relation pgDistNode = NULL;
|
Relation pgDistNode = NULL;
|
||||||
TupleDesc tupleDescriptor = NULL;
|
TupleDesc tupleDescriptor = NULL;
|
||||||
|
@ -668,6 +673,7 @@ InsertNodeRow(int nodeid, char *nodeName, int32 nodePort, uint32 groupId, char *
|
||||||
values[Anum_pg_dist_node_nodeport - 1] = UInt32GetDatum(nodePort);
|
values[Anum_pg_dist_node_nodeport - 1] = UInt32GetDatum(nodePort);
|
||||||
values[Anum_pg_dist_node_noderack - 1] = CStringGetTextDatum(nodeRack);
|
values[Anum_pg_dist_node_noderack - 1] = CStringGetTextDatum(nodeRack);
|
||||||
values[Anum_pg_dist_node_hasmetadata - 1] = BoolGetDatum(hasMetadata);
|
values[Anum_pg_dist_node_hasmetadata - 1] = BoolGetDatum(hasMetadata);
|
||||||
|
values[Anum_pg_dist_node_noderole - 1] = CharGetDatum(noderole);
|
||||||
|
|
||||||
/* open shard relation and insert new tuple */
|
/* open shard relation and insert new tuple */
|
||||||
pgDistNode = heap_open(DistNodeRelationId(), AccessExclusiveLock);
|
pgDistNode = heap_open(DistNodeRelationId(), AccessExclusiveLock);
|
||||||
|
@ -867,6 +873,7 @@ ParseWorkerNodeFileAndRename()
|
||||||
strlcpy(workerNode->workerRack, nodeRack, WORKER_LENGTH);
|
strlcpy(workerNode->workerRack, nodeRack, WORKER_LENGTH);
|
||||||
workerNode->workerPort = nodePort;
|
workerNode->workerPort = nodePort;
|
||||||
workerNode->hasMetadata = false;
|
workerNode->hasMetadata = false;
|
||||||
|
workerNode->nodeRole = NODE_ROLE_PRIMARY;
|
||||||
|
|
||||||
workerNodeList = lappend(workerNodeList, workerNode);
|
workerNodeList = lappend(workerNodeList, workerNode);
|
||||||
}
|
}
|
||||||
|
@ -906,6 +913,14 @@ TupleToWorkerNode(TupleDesc tupleDescriptor, HeapTuple heapTuple)
|
||||||
tupleDescriptor, &isNull);
|
tupleDescriptor, &isNull);
|
||||||
Datum hasMetadata = heap_getattr(heapTuple, Anum_pg_dist_node_hasmetadata,
|
Datum hasMetadata = heap_getattr(heapTuple, Anum_pg_dist_node_hasmetadata,
|
||||||
tupleDescriptor, &isNull);
|
tupleDescriptor, &isNull);
|
||||||
|
Datum nodeRole = heap_getattr(heapTuple, Anum_pg_dist_node_noderole,
|
||||||
|
tupleDescriptor, &isNull);
|
||||||
|
|
||||||
|
/* since the column doesn't exist yet assume it references a primary */
|
||||||
|
if (HeapTupleHeaderGetNatts(heapTuple->t_data) < Anum_pg_dist_node_noderole)
|
||||||
|
{
|
||||||
|
nodeRole = CharGetDatum(NODE_ROLE_PRIMARY);
|
||||||
|
}
|
||||||
|
|
||||||
Assert(!HeapTupleHasNulls(heapTuple));
|
Assert(!HeapTupleHasNulls(heapTuple));
|
||||||
|
|
||||||
|
@ -916,6 +931,7 @@ TupleToWorkerNode(TupleDesc tupleDescriptor, HeapTuple heapTuple)
|
||||||
strlcpy(workerNode->workerName, TextDatumGetCString(nodeName), WORKER_LENGTH);
|
strlcpy(workerNode->workerName, TextDatumGetCString(nodeName), WORKER_LENGTH);
|
||||||
strlcpy(workerNode->workerRack, TextDatumGetCString(nodeRack), WORKER_LENGTH);
|
strlcpy(workerNode->workerRack, TextDatumGetCString(nodeRack), WORKER_LENGTH);
|
||||||
workerNode->hasMetadata = DatumGetBool(hasMetadata);
|
workerNode->hasMetadata = DatumGetBool(hasMetadata);
|
||||||
|
workerNode->nodeRole = DatumGetChar(nodeRole);
|
||||||
|
|
||||||
return workerNode;
|
return workerNode;
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,6 +23,7 @@ typedef struct FormData_pg_dist_node
|
||||||
text nodename;
|
text nodename;
|
||||||
int nodeport;
|
int nodeport;
|
||||||
bool hasmetadata;
|
bool hasmetadata;
|
||||||
|
char noderole; /* see codes below */
|
||||||
#endif
|
#endif
|
||||||
} FormData_pg_dist_node;
|
} FormData_pg_dist_node;
|
||||||
|
|
||||||
|
@ -37,15 +38,19 @@ typedef FormData_pg_dist_node *Form_pg_dist_node;
|
||||||
* compiler constants for pg_dist_node
|
* compiler constants for pg_dist_node
|
||||||
* ----------------
|
* ----------------
|
||||||
*/
|
*/
|
||||||
#define Natts_pg_dist_node 6
|
#define Natts_pg_dist_node 7
|
||||||
#define Anum_pg_dist_node_nodeid 1
|
#define Anum_pg_dist_node_nodeid 1
|
||||||
#define Anum_pg_dist_node_groupid 2
|
#define Anum_pg_dist_node_groupid 2
|
||||||
#define Anum_pg_dist_node_nodename 3
|
#define Anum_pg_dist_node_nodename 3
|
||||||
#define Anum_pg_dist_node_nodeport 4
|
#define Anum_pg_dist_node_nodeport 4
|
||||||
#define Anum_pg_dist_node_noderack 5
|
#define Anum_pg_dist_node_noderack 5
|
||||||
#define Anum_pg_dist_node_hasmetadata 6
|
#define Anum_pg_dist_node_hasmetadata 6
|
||||||
|
#define Anum_pg_dist_node_noderole 7
|
||||||
|
|
||||||
#define GROUPID_SEQUENCE_NAME "pg_dist_groupid_seq"
|
#define GROUPID_SEQUENCE_NAME "pg_dist_groupid_seq"
|
||||||
#define NODEID_SEQUENCE_NAME "pg_dist_node_nodeid_seq"
|
#define NODEID_SEQUENCE_NAME "pg_dist_node_nodeid_seq"
|
||||||
|
|
||||||
|
#define NODE_ROLE_PRIMARY 'p'
|
||||||
|
#define NODE_ROLE_SECONDARY 's'
|
||||||
|
|
||||||
#endif /* PG_DIST_NODE_H */
|
#endif /* PG_DIST_NODE_H */
|
||||||
|
|
|
@ -43,6 +43,7 @@ typedef struct WorkerNode
|
||||||
uint32 groupId; /* node's groupId; same for the nodes that are in the same group */
|
uint32 groupId; /* node's groupId; same for the nodes that are in the same group */
|
||||||
char workerRack[WORKER_LENGTH]; /* node's network location */
|
char workerRack[WORKER_LENGTH]; /* node's network location */
|
||||||
bool hasMetadata; /* node gets metadata changes */
|
bool hasMetadata; /* node gets metadata changes */
|
||||||
|
char nodeRole; /* whether the node is a primary or secondary */
|
||||||
} WorkerNode;
|
} WorkerNode;
|
||||||
|
|
||||||
|
|
||||||
|
@ -57,8 +58,7 @@ extern WorkerNode * WorkerGetRoundRobinCandidateNode(List *workerNodeList,
|
||||||
uint64 shardId,
|
uint64 shardId,
|
||||||
uint32 placementIndex);
|
uint32 placementIndex);
|
||||||
extern WorkerNode * WorkerGetLocalFirstCandidateNode(List *currentNodeList);
|
extern WorkerNode * WorkerGetLocalFirstCandidateNode(List *currentNodeList);
|
||||||
extern WorkerNode * WorkerGetNodeWithName(const char *hostname);
|
extern uint32 WorkerGetLiveGroupCount(void);
|
||||||
extern uint32 WorkerGetLiveNodeCount(void);
|
|
||||||
extern List * WorkerNodeList(void);
|
extern List * WorkerNodeList(void);
|
||||||
extern WorkerNode * FindWorkerNode(char *nodeName, int32 nodePort);
|
extern WorkerNode * FindWorkerNode(char *nodeName, int32 nodePort);
|
||||||
extern List * ReadWorkerNodes(void);
|
extern List * ReadWorkerNodes(void);
|
||||||
|
|
|
@ -10,14 +10,14 @@ DETAIL: There are no active worker nodes.
|
||||||
-- add the nodes to the cluster
|
-- add the nodes to the cluster
|
||||||
SELECT master_add_node('localhost', :worker_1_port);
|
SELECT master_add_node('localhost', :worker_1_port);
|
||||||
master_add_node
|
master_add_node
|
||||||
---------------------------------
|
-----------------------------------
|
||||||
(1,1,localhost,57637,default,f)
|
(1,1,localhost,57637,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
master_add_node
|
master_add_node
|
||||||
---------------------------------
|
-----------------------------------
|
||||||
(2,2,localhost,57638,default,f)
|
(2,2,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- get the active nodes
|
-- get the active nodes
|
||||||
|
@ -30,9 +30,9 @@ SELECT master_get_active_worker_nodes();
|
||||||
|
|
||||||
-- try to add a node that is already in the cluster
|
-- try to add a node that is already in the cluster
|
||||||
SELECT * FROM master_add_node('localhost', :worker_1_port);
|
SELECT * FROM master_add_node('localhost', :worker_1_port);
|
||||||
nodeid | groupid | nodename | nodeport | noderack | hasmetadata
|
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | noderole
|
||||||
--------+---------+-----------+----------+----------+-------------
|
--------+---------+-----------+----------+----------+-------------+----------
|
||||||
1 | 1 | localhost | 57637 | default | f
|
1 | 1 | localhost | 57637 | default | f | p
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- get the active nodes
|
-- get the active nodes
|
||||||
|
@ -60,8 +60,8 @@ SELECT master_get_active_worker_nodes();
|
||||||
-- try to disable a node with no placements see that node is removed
|
-- try to disable a node with no placements see that node is removed
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
master_add_node
|
master_add_node
|
||||||
---------------------------------
|
-----------------------------------
|
||||||
(3,3,localhost,57638,default,f)
|
(3,3,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
SELECT master_disable_node('localhost', :worker_2_port);
|
SELECT master_disable_node('localhost', :worker_2_port);
|
||||||
|
@ -79,8 +79,8 @@ SELECT master_get_active_worker_nodes();
|
||||||
-- add some shard placements to the cluster
|
-- add some shard placements to the cluster
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
master_add_node
|
master_add_node
|
||||||
---------------------------------
|
-----------------------------------
|
||||||
(4,4,localhost,57638,default,f)
|
(4,4,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
CREATE TABLE cluster_management_test (col_1 text, col_2 int);
|
CREATE TABLE cluster_management_test (col_1 text, col_2 int);
|
||||||
|
@ -140,8 +140,8 @@ SELECT master_get_active_worker_nodes();
|
||||||
-- restore the node for next tests
|
-- restore the node for next tests
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
master_add_node
|
master_add_node
|
||||||
---------------------------------
|
-----------------------------------
|
||||||
(5,5,localhost,57638,default,f)
|
(5,5,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- try to remove a node with active placements and see that node removal is failed
|
-- try to remove a node with active placements and see that node removal is failed
|
||||||
|
@ -178,8 +178,8 @@ SELECT master_get_active_worker_nodes();
|
||||||
-- clean-up
|
-- clean-up
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
master_add_node
|
master_add_node
|
||||||
---------------------------------
|
-----------------------------------
|
||||||
(6,6,localhost,57638,default,f)
|
(6,6,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
UPDATE pg_dist_shard_placement SET shardstate=1 WHERE nodeport=:worker_2_port;
|
UPDATE pg_dist_shard_placement SET shardstate=1 WHERE nodeport=:worker_2_port;
|
||||||
|
@ -194,8 +194,8 @@ SELECT master_remove_node('localhost', :worker_2_port);
|
||||||
UPDATE pg_dist_node SET hasmetadata=true WHERE nodeport=:worker_1_port;
|
UPDATE pg_dist_node SET hasmetadata=true WHERE nodeport=:worker_1_port;
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
master_add_node
|
master_add_node
|
||||||
---------------------------------
|
-----------------------------------
|
||||||
(7,7,localhost,57638,default,f)
|
(7,7,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
|
@ -223,8 +223,8 @@ SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodep
|
||||||
UPDATE pg_dist_node SET hasmetadata=false WHERE nodeport=:worker_1_port;
|
UPDATE pg_dist_node SET hasmetadata=false WHERE nodeport=:worker_1_port;
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
master_add_node
|
master_add_node
|
||||||
---------------------------------
|
-----------------------------------
|
||||||
(8,8,localhost,57638,default,f)
|
(8,8,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
|
@ -244,8 +244,8 @@ SELECT
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
SELECT * FROM pg_dist_node ORDER BY nodeid;
|
SELECT * FROM pg_dist_node ORDER BY nodeid;
|
||||||
nodeid | groupid | nodename | nodeport | noderack | hasmetadata
|
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | noderole
|
||||||
--------+---------+----------+----------+----------+-------------
|
--------+---------+----------+----------+----------+-------------+----------
|
||||||
(0 rows)
|
(0 rows)
|
||||||
|
|
||||||
-- check that adding two nodes in the same transaction works
|
-- check that adding two nodes in the same transaction works
|
||||||
|
@ -253,15 +253,15 @@ SELECT
|
||||||
master_add_node('localhost', :worker_1_port),
|
master_add_node('localhost', :worker_1_port),
|
||||||
master_add_node('localhost', :worker_2_port);
|
master_add_node('localhost', :worker_2_port);
|
||||||
master_add_node | master_add_node
|
master_add_node | master_add_node
|
||||||
---------------------------------+-----------------------------------
|
-----------------------------------+-------------------------------------
|
||||||
(9,9,localhost,57637,default,f) | (10,10,localhost,57638,default,f)
|
(9,9,localhost,57637,default,f,p) | (10,10,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
SELECT * FROM pg_dist_node ORDER BY nodeid;
|
SELECT * FROM pg_dist_node ORDER BY nodeid;
|
||||||
nodeid | groupid | nodename | nodeport | noderack | hasmetadata
|
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | noderole
|
||||||
--------+---------+-----------+----------+----------+-------------
|
--------+---------+-----------+----------+----------+-------------+----------
|
||||||
9 | 9 | localhost | 57637 | default | f
|
9 | 9 | localhost | 57637 | default | f | p
|
||||||
10 | 10 | localhost | 57638 | default | f
|
10 | 10 | localhost | 57638 | default | f | p
|
||||||
(2 rows)
|
(2 rows)
|
||||||
|
|
||||||
-- check that mixed add/remove node commands work fine inside transaction
|
-- check that mixed add/remove node commands work fine inside transaction
|
||||||
|
@ -274,8 +274,8 @@ SELECT master_remove_node('localhost', :worker_2_port);
|
||||||
|
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
master_add_node
|
master_add_node
|
||||||
-----------------------------------
|
-------------------------------------
|
||||||
(11,11,localhost,57638,default,f)
|
(11,11,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
SELECT master_remove_node('localhost', :worker_2_port);
|
SELECT master_remove_node('localhost', :worker_2_port);
|
||||||
|
@ -294,8 +294,8 @@ UPDATE pg_dist_node SET hasmetadata=true WHERE nodeport=:worker_1_port;
|
||||||
BEGIN;
|
BEGIN;
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
master_add_node
|
master_add_node
|
||||||
-----------------------------------
|
-------------------------------------
|
||||||
(12,12,localhost,57638,default,f)
|
(12,12,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
SELECT master_remove_node('localhost', :worker_2_port);
|
SELECT master_remove_node('localhost', :worker_2_port);
|
||||||
|
@ -306,8 +306,8 @@ SELECT master_remove_node('localhost', :worker_2_port);
|
||||||
|
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
master_add_node
|
master_add_node
|
||||||
-----------------------------------
|
-------------------------------------
|
||||||
(13,13,localhost,57638,default,f)
|
(13,13,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
COMMIT;
|
COMMIT;
|
||||||
|
@ -334,14 +334,14 @@ SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node;
|
||||||
|
|
||||||
SELECT master_add_node('localhost', :worker_1_port);
|
SELECT master_add_node('localhost', :worker_1_port);
|
||||||
master_add_node
|
master_add_node
|
||||||
-----------------------------------
|
-------------------------------------
|
||||||
(14,14,localhost,57637,default,f)
|
(14,14,localhost,57637,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
master_add_node
|
master_add_node
|
||||||
-----------------------------------
|
-------------------------------------
|
||||||
(15,15,localhost,57638,default,f)
|
(15,15,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- check that a distributed table can be created after adding a node in a transaction
|
-- check that a distributed table can be created after adding a node in a transaction
|
||||||
|
@ -354,8 +354,8 @@ SELECT master_remove_node('localhost', :worker_2_port);
|
||||||
BEGIN;
|
BEGIN;
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
master_add_node
|
master_add_node
|
||||||
-----------------------------------
|
-------------------------------------
|
||||||
(16,16,localhost,57638,default,f)
|
(16,16,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
CREATE TABLE temp(col1 text, col2 int);
|
CREATE TABLE temp(col1 text, col2 int);
|
||||||
|
|
|
@ -22,14 +22,14 @@ CREATE EXTENSION citus;
|
||||||
-- re-add the nodes to the cluster
|
-- re-add the nodes to the cluster
|
||||||
SELECT master_add_node('localhost', :worker_1_port);
|
SELECT master_add_node('localhost', :worker_1_port);
|
||||||
master_add_node
|
master_add_node
|
||||||
---------------------------------
|
-----------------------------------
|
||||||
(1,1,localhost,57637,default,f)
|
(1,1,localhost,57637,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
master_add_node
|
master_add_node
|
||||||
---------------------------------
|
-----------------------------------
|
||||||
(2,2,localhost,57638,default,f)
|
(2,2,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- verify that a table can be created after the extension has been dropped and recreated
|
-- verify that a table can be created after the extension has been dropped and recreated
|
||||||
|
|
|
@ -75,6 +75,7 @@ ALTER EXTENSION citus UPDATE TO '6.1-14';
|
||||||
ALTER EXTENSION citus UPDATE TO '6.1-15';
|
ALTER EXTENSION citus UPDATE TO '6.1-15';
|
||||||
ALTER EXTENSION citus UPDATE TO '6.1-16';
|
ALTER EXTENSION citus UPDATE TO '6.1-16';
|
||||||
ALTER EXTENSION citus UPDATE TO '6.1-17';
|
ALTER EXTENSION citus UPDATE TO '6.1-17';
|
||||||
|
ALTER EXTENSION citus UPDATE TO '6.1-18';
|
||||||
-- ensure no objects were created outside pg_catalog
|
-- ensure no objects were created outside pg_catalog
|
||||||
SELECT COUNT(*)
|
SELECT COUNT(*)
|
||||||
FROM pg_depend AS pgd,
|
FROM pg_depend AS pgd,
|
||||||
|
|
|
@ -29,10 +29,10 @@ SELECT * FROM pg_dist_partition WHERE partmethod='h' AND repmodel='s';
|
||||||
-- pg_dist_node entries and reference tables
|
-- pg_dist_node entries and reference tables
|
||||||
SELECT unnest(master_metadata_snapshot());
|
SELECT unnest(master_metadata_snapshot());
|
||||||
unnest
|
unnest
|
||||||
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
TRUNCATE pg_dist_node
|
TRUNCATE pg_dist_node
|
||||||
SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition
|
SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition
|
||||||
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata) VALUES (2, 2, 'localhost', 57638, 'default', FALSE),(1, 1, 'localhost', 57637, 'default', FALSE)
|
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, noderole) VALUES (2, 2, 'localhost', 57638, 'default', FALSE, 'p'),(1, 1, 'localhost', 57637, 'default', FALSE, 'p')
|
||||||
(3 rows)
|
(3 rows)
|
||||||
|
|
||||||
-- Create a test table with constraints and SERIAL
|
-- Create a test table with constraints and SERIAL
|
||||||
|
@ -58,7 +58,7 @@ SELECT unnest(master_metadata_snapshot());
|
||||||
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
TRUNCATE pg_dist_node
|
TRUNCATE pg_dist_node
|
||||||
SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition
|
SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition
|
||||||
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata) VALUES (2, 2, 'localhost', 57638, 'default', FALSE),(1, 1, 'localhost', 57637, 'default', FALSE)
|
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, noderole) VALUES (2, 2, 'localhost', 57638, 'default', FALSE, 'p'),(1, 1, 'localhost', 57637, 'default', FALSE, 'p')
|
||||||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE')
|
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE')
|
||||||
ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres
|
ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres
|
||||||
CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE
|
CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE
|
||||||
|
@ -80,7 +80,7 @@ SELECT unnest(master_metadata_snapshot());
|
||||||
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
TRUNCATE pg_dist_node
|
TRUNCATE pg_dist_node
|
||||||
SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition
|
SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition
|
||||||
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata) VALUES (2, 2, 'localhost', 57638, 'default', FALSE),(1, 1, 'localhost', 57637, 'default', FALSE)
|
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, noderole) VALUES (2, 2, 'localhost', 57638, 'default', FALSE, 'p'),(1, 1, 'localhost', 57637, 'default', FALSE, 'p')
|
||||||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE')
|
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE')
|
||||||
ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres
|
ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres
|
||||||
CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE
|
CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE
|
||||||
|
@ -104,7 +104,7 @@ SELECT unnest(master_metadata_snapshot());
|
||||||
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
TRUNCATE pg_dist_node
|
TRUNCATE pg_dist_node
|
||||||
SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition
|
SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition
|
||||||
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata) VALUES (2, 2, 'localhost', 57638, 'default', FALSE),(1, 1, 'localhost', 57637, 'default', FALSE)
|
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, noderole) VALUES (2, 2, 'localhost', 57638, 'default', FALSE, 'p'),(1, 1, 'localhost', 57637, 'default', FALSE, 'p')
|
||||||
CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres
|
CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres
|
||||||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE')
|
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE')
|
||||||
ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres
|
ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres
|
||||||
|
@ -134,7 +134,7 @@ SELECT unnest(master_metadata_snapshot());
|
||||||
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
TRUNCATE pg_dist_node
|
TRUNCATE pg_dist_node
|
||||||
SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition
|
SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition
|
||||||
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata) VALUES (2, 2, 'localhost', 57638, 'default', FALSE),(1, 1, 'localhost', 57637, 'default', FALSE)
|
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, noderole) VALUES (2, 2, 'localhost', 57638, 'default', FALSE, 'p'),(1, 1, 'localhost', 57637, 'default', FALSE, 'p')
|
||||||
CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres
|
CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres
|
||||||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE')
|
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE')
|
||||||
ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres
|
ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres
|
||||||
|
@ -157,7 +157,7 @@ SELECT unnest(master_metadata_snapshot());
|
||||||
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
TRUNCATE pg_dist_node
|
TRUNCATE pg_dist_node
|
||||||
SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition
|
SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition
|
||||||
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata) VALUES (2, 2, 'localhost', 57638, 'default', FALSE),(1, 1, 'localhost', 57637, 'default', FALSE)
|
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, noderole) VALUES (2, 2, 'localhost', 57638, 'default', FALSE, 'p'),(1, 1, 'localhost', 57637, 'default', FALSE, 'p')
|
||||||
CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres
|
CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres
|
||||||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE')
|
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE')
|
||||||
ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres
|
ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres
|
||||||
|
@ -203,10 +203,10 @@ SELECT * FROM pg_dist_local_group;
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
SELECT * FROM pg_dist_node ORDER BY nodeid;
|
SELECT * FROM pg_dist_node ORDER BY nodeid;
|
||||||
nodeid | groupid | nodename | nodeport | noderack | hasmetadata
|
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | noderole
|
||||||
--------+---------+-----------+----------+----------+-------------
|
--------+---------+-----------+----------+----------+-------------+----------
|
||||||
1 | 1 | localhost | 57637 | default | t
|
1 | 1 | localhost | 57637 | default | t | p
|
||||||
2 | 2 | localhost | 57638 | default | f
|
2 | 2 | localhost | 57638 | default | f | p
|
||||||
(2 rows)
|
(2 rows)
|
||||||
|
|
||||||
SELECT * FROM pg_dist_partition ORDER BY logicalrelid;
|
SELECT * FROM pg_dist_partition ORDER BY logicalrelid;
|
||||||
|
@ -333,10 +333,10 @@ SELECT * FROM pg_dist_local_group;
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
SELECT * FROM pg_dist_node ORDER BY nodeid;
|
SELECT * FROM pg_dist_node ORDER BY nodeid;
|
||||||
nodeid | groupid | nodename | nodeport | noderack | hasmetadata
|
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | noderole
|
||||||
--------+---------+-----------+----------+----------+-------------
|
--------+---------+-----------+----------+----------+-------------+----------
|
||||||
1 | 1 | localhost | 57637 | default | t
|
1 | 1 | localhost | 57637 | default | t | p
|
||||||
2 | 2 | localhost | 57638 | default | f
|
2 | 2 | localhost | 57638 | default | f | p
|
||||||
(2 rows)
|
(2 rows)
|
||||||
|
|
||||||
SELECT * FROM pg_dist_partition ORDER BY logicalrelid;
|
SELECT * FROM pg_dist_partition ORDER BY logicalrelid;
|
||||||
|
@ -1134,8 +1134,8 @@ SELECT create_distributed_table('mx_table', 'a');
|
||||||
\c - postgres - :master_port
|
\c - postgres - :master_port
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
master_add_node
|
master_add_node
|
||||||
---------------------------------
|
-----------------------------------
|
||||||
(4,4,localhost,57638,default,f)
|
(4,4,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
||||||
|
@ -1318,8 +1318,8 @@ WHERE logicalrelid='mx_ref'::regclass;
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
NOTICE: Replicating reference table "mx_ref" to all workers
|
NOTICE: Replicating reference table "mx_ref" to all workers
|
||||||
master_add_node
|
master_add_node
|
||||||
---------------------------------
|
-----------------------------------
|
||||||
(5,5,localhost,57638,default,f)
|
(5,5,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
SELECT shardid, nodename, nodeport
|
SELECT shardid, nodename, nodeport
|
||||||
|
|
|
@ -44,8 +44,8 @@ SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||||
-- re-add the node for next tests
|
-- re-add the node for next tests
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
master_add_node
|
master_add_node
|
||||||
---------------------------------------------
|
-----------------------------------------------
|
||||||
(1380000,1380000,localhost,57638,default,f)
|
(1380000,1380000,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- remove a node with reference table
|
-- remove a node with reference table
|
||||||
|
@ -166,8 +166,8 @@ ERROR: could not find valid entry for node "localhost:57638"
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
NOTICE: Replicating reference table "remove_node_reference_table" to all workers
|
NOTICE: Replicating reference table "remove_node_reference_table" to all workers
|
||||||
master_add_node
|
master_add_node
|
||||||
---------------------------------------------
|
-----------------------------------------------
|
||||||
(1380001,1380001,localhost,57638,default,f)
|
(1380001,1380001,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- remove node in a transaction and ROLLBACK
|
-- remove node in a transaction and ROLLBACK
|
||||||
|
@ -389,8 +389,8 @@ WHERE
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
NOTICE: Replicating reference table "remove_node_reference_table" to all workers
|
NOTICE: Replicating reference table "remove_node_reference_table" to all workers
|
||||||
master_add_node
|
master_add_node
|
||||||
---------------------------------------------
|
-----------------------------------------------
|
||||||
(1380002,1380002,localhost,57638,default,f)
|
(1380002,1380002,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- test inserting a value then removing a node in a transaction
|
-- test inserting a value then removing a node in a transaction
|
||||||
|
@ -518,8 +518,8 @@ SELECT * FROM remove_node_reference_table;
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
NOTICE: Replicating reference table "remove_node_reference_table" to all workers
|
NOTICE: Replicating reference table "remove_node_reference_table" to all workers
|
||||||
master_add_node
|
master_add_node
|
||||||
---------------------------------------------
|
-----------------------------------------------
|
||||||
(1380003,1380003,localhost,57638,default,f)
|
(1380003,1380003,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- test executing DDL command then removing a node in a transaction
|
-- test executing DDL command then removing a node in a transaction
|
||||||
|
@ -643,8 +643,8 @@ Table "public.remove_node_reference_table"
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
NOTICE: Replicating reference table "remove_node_reference_table" to all workers
|
NOTICE: Replicating reference table "remove_node_reference_table" to all workers
|
||||||
master_add_node
|
master_add_node
|
||||||
---------------------------------------------
|
-----------------------------------------------
|
||||||
(1380004,1380004,localhost,57638,default,f)
|
(1380004,1380004,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- test DROP table after removing a node in a transaction
|
-- test DROP table after removing a node in a transaction
|
||||||
|
@ -711,8 +711,8 @@ SELECT * FROM pg_dist_colocation WHERE colocationid = 1380000;
|
||||||
-- re-add the node for next tests
|
-- re-add the node for next tests
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
master_add_node
|
master_add_node
|
||||||
---------------------------------------------
|
-----------------------------------------------
|
||||||
(1380005,1380005,localhost,57638,default,f)
|
(1380005,1380005,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- re-create remove_node_reference_table
|
-- re-create remove_node_reference_table
|
||||||
|
@ -846,8 +846,8 @@ SELECT master_add_node('localhost', :worker_2_port);
|
||||||
NOTICE: Replicating reference table "remove_node_reference_table" to all workers
|
NOTICE: Replicating reference table "remove_node_reference_table" to all workers
|
||||||
NOTICE: Replicating reference table "table1" to all workers
|
NOTICE: Replicating reference table "table1" to all workers
|
||||||
master_add_node
|
master_add_node
|
||||||
---------------------------------------------
|
-----------------------------------------------
|
||||||
(1380006,1380006,localhost,57638,default,f)
|
(1380006,1380006,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- test with master_disable_node
|
-- test with master_disable_node
|
||||||
|
@ -963,8 +963,8 @@ SELECT master_add_node('localhost', :worker_2_port);
|
||||||
NOTICE: Replicating reference table "remove_node_reference_table" to all workers
|
NOTICE: Replicating reference table "remove_node_reference_table" to all workers
|
||||||
NOTICE: Replicating reference table "table1" to all workers
|
NOTICE: Replicating reference table "table1" to all workers
|
||||||
master_add_node
|
master_add_node
|
||||||
---------------------------------------------
|
-----------------------------------------------
|
||||||
(1380007,1380007,localhost,57638,default,f)
|
(1380007,1380007,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- DROP tables to clean workspace
|
-- DROP tables to clean workspace
|
||||||
|
|
|
@ -26,8 +26,8 @@ SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||||
|
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
master_add_node
|
master_add_node
|
||||||
---------------------------------------------
|
-----------------------------------------------
|
||||||
(1370000,1370000,localhost,57638,default,f)
|
(1370000,1370000,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- verify node is added
|
-- verify node is added
|
||||||
|
@ -125,8 +125,8 @@ WHERE colocationid IN
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
NOTICE: Replicating reference table "replicate_reference_table_valid" to all workers
|
NOTICE: Replicating reference table "replicate_reference_table_valid" to all workers
|
||||||
master_add_node
|
master_add_node
|
||||||
---------------------------------------------
|
-----------------------------------------------
|
||||||
(1370002,1370002,localhost,57638,default,f)
|
(1370002,1370002,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- status after master_add_node
|
-- status after master_add_node
|
||||||
|
@ -178,8 +178,8 @@ WHERE colocationid IN
|
||||||
|
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
master_add_node
|
master_add_node
|
||||||
---------------------------------------------
|
-----------------------------------------------
|
||||||
(1370002,1370002,localhost,57638,default,f)
|
(1370002,1370002,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- status after master_add_node
|
-- status after master_add_node
|
||||||
|
@ -246,8 +246,8 @@ BEGIN;
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
NOTICE: Replicating reference table "replicate_reference_table_rollback" to all workers
|
NOTICE: Replicating reference table "replicate_reference_table_rollback" to all workers
|
||||||
master_add_node
|
master_add_node
|
||||||
---------------------------------------------
|
-----------------------------------------------
|
||||||
(1370003,1370003,localhost,57638,default,f)
|
(1370003,1370003,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
ROLLBACK;
|
ROLLBACK;
|
||||||
|
@ -308,8 +308,8 @@ BEGIN;
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
NOTICE: Replicating reference table "replicate_reference_table_commit" to all workers
|
NOTICE: Replicating reference table "replicate_reference_table_commit" to all workers
|
||||||
master_add_node
|
master_add_node
|
||||||
---------------------------------------------
|
-----------------------------------------------
|
||||||
(1370004,1370004,localhost,57638,default,f)
|
(1370004,1370004,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
COMMIT;
|
COMMIT;
|
||||||
|
@ -403,8 +403,8 @@ BEGIN;
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
NOTICE: Replicating reference table "replicate_reference_table_reference_one" to all workers
|
NOTICE: Replicating reference table "replicate_reference_table_reference_one" to all workers
|
||||||
master_add_node
|
master_add_node
|
||||||
---------------------------------------------
|
-----------------------------------------------
|
||||||
(1370005,1370005,localhost,57638,default,f)
|
(1370005,1370005,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
SELECT upgrade_to_reference_table('replicate_reference_table_hash');
|
SELECT upgrade_to_reference_table('replicate_reference_table_hash');
|
||||||
|
@ -552,8 +552,8 @@ BEGIN;
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
NOTICE: Replicating reference table "replicate_reference_table_drop" to all workers
|
NOTICE: Replicating reference table "replicate_reference_table_drop" to all workers
|
||||||
master_add_node
|
master_add_node
|
||||||
---------------------------------------------
|
-----------------------------------------------
|
||||||
(1370009,1370009,localhost,57638,default,f)
|
(1370009,1370009,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
DROP TABLE replicate_reference_table_drop;
|
DROP TABLE replicate_reference_table_drop;
|
||||||
|
@ -614,8 +614,8 @@ WHERE colocationid IN
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
NOTICE: Replicating reference table "table1" to all workers
|
NOTICE: Replicating reference table "table1" to all workers
|
||||||
master_add_node
|
master_add_node
|
||||||
---------------------------------------------
|
-----------------------------------------------
|
||||||
(1370010,1370010,localhost,57638,default,f)
|
(1370010,1370010,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- status after master_add_node
|
-- status after master_add_node
|
||||||
|
|
|
@ -0,0 +1,116 @@
|
||||||
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1420000;
|
||||||
|
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1420000;
|
||||||
|
-- run master_create_empty_shard with differing placement policies
|
||||||
|
CREATE TABLE append_table (a int);
|
||||||
|
SELECT master_create_distributed_table('append_table', 'a', 'append');
|
||||||
|
master_create_distributed_table
|
||||||
|
---------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SET citus.shard_placement_policy TO 'round-robin';
|
||||||
|
SELECT master_create_empty_shard('append_table');
|
||||||
|
master_create_empty_shard
|
||||||
|
---------------------------
|
||||||
|
1420000
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT master_create_empty_shard('append_table');
|
||||||
|
master_create_empty_shard
|
||||||
|
---------------------------
|
||||||
|
1420001
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT * FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'append_table'::regclass);
|
||||||
|
shardid | shardstate | shardlength | nodename | nodeport | placementid
|
||||||
|
---------+------------+-------------+-----------+----------+-------------
|
||||||
|
1420000 | 1 | 0 | localhost | 57638 | 311
|
||||||
|
1420000 | 1 | 0 | localhost | 57637 | 312
|
||||||
|
1420001 | 1 | 0 | localhost | 57637 | 313
|
||||||
|
1420001 | 1 | 0 | localhost | 57638 | 314
|
||||||
|
(4 rows)
|
||||||
|
|
||||||
|
UPDATE pg_dist_node SET noderole = 's' WHERE nodeport = :worker_2_port;
|
||||||
|
-- round robin only considers primary nodes
|
||||||
|
SELECT master_create_empty_shard('append_table');
|
||||||
|
ERROR: could only find 1 of 2 possible nodes
|
||||||
|
SET citus.shard_replication_factor = 1;
|
||||||
|
SET citus.shard_placement_policy TO 'random';
|
||||||
|
-- make sure it works when there's only one primary
|
||||||
|
SELECT master_create_empty_shard('append_table');
|
||||||
|
master_create_empty_shard
|
||||||
|
---------------------------
|
||||||
|
1420003
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT * FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'append_table'::regclass);
|
||||||
|
shardid | shardstate | shardlength | nodename | nodeport | placementid
|
||||||
|
---------+------------+-------------+-----------+----------+-------------
|
||||||
|
1420000 | 1 | 0 | localhost | 57638 | 311
|
||||||
|
1420000 | 1 | 0 | localhost | 57637 | 312
|
||||||
|
1420001 | 1 | 0 | localhost | 57637 | 313
|
||||||
|
1420001 | 1 | 0 | localhost | 57638 | 314
|
||||||
|
1420003 | 1 | 0 | localhost | 57637 | 315
|
||||||
|
(5 rows)
|
||||||
|
|
||||||
|
SELECT setseed(0.5);
|
||||||
|
setseed
|
||||||
|
---------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
UPDATE pg_dist_node SET noderole = 'p' WHERE nodeport = :worker_2_port;
|
||||||
|
SELECT master_create_empty_shard('append_table');
|
||||||
|
master_create_empty_shard
|
||||||
|
---------------------------
|
||||||
|
1420004
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT master_create_empty_shard('append_table');
|
||||||
|
master_create_empty_shard
|
||||||
|
---------------------------
|
||||||
|
1420005
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT master_create_empty_shard('append_table');
|
||||||
|
master_create_empty_shard
|
||||||
|
---------------------------
|
||||||
|
1420006
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT master_create_empty_shard('append_table');
|
||||||
|
master_create_empty_shard
|
||||||
|
---------------------------
|
||||||
|
1420007
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT master_create_empty_shard('append_table');
|
||||||
|
master_create_empty_shard
|
||||||
|
---------------------------
|
||||||
|
1420008
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT master_create_empty_shard('append_table');
|
||||||
|
master_create_empty_shard
|
||||||
|
---------------------------
|
||||||
|
1420009
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT * FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'append_table'::regclass);
|
||||||
|
shardid | shardstate | shardlength | nodename | nodeport | placementid
|
||||||
|
---------+------------+-------------+-----------+----------+-------------
|
||||||
|
1420000 | 1 | 0 | localhost | 57638 | 311
|
||||||
|
1420000 | 1 | 0 | localhost | 57637 | 312
|
||||||
|
1420001 | 1 | 0 | localhost | 57637 | 313
|
||||||
|
1420001 | 1 | 0 | localhost | 57638 | 314
|
||||||
|
1420003 | 1 | 0 | localhost | 57637 | 315
|
||||||
|
1420004 | 1 | 0 | localhost | 57638 | 316
|
||||||
|
1420005 | 1 | 0 | localhost | 57638 | 317
|
||||||
|
1420006 | 1 | 0 | localhost | 57638 | 318
|
||||||
|
1420007 | 1 | 0 | localhost | 57638 | 319
|
||||||
|
1420008 | 1 | 0 | localhost | 57637 | 320
|
||||||
|
1420009 | 1 | 0 | localhost | 57637 | 321
|
||||||
|
(11 rows)
|
||||||
|
|
||||||
|
-- clean up
|
||||||
|
DROP TABLE append_table;
|
|
@ -79,14 +79,14 @@ CREATE EXTENSION citus;
|
||||||
-- re-add the nodes to the cluster
|
-- re-add the nodes to the cluster
|
||||||
SELECT master_add_node('localhost', :worker_1_port);
|
SELECT master_add_node('localhost', :worker_1_port);
|
||||||
master_add_node
|
master_add_node
|
||||||
---------------------------------
|
-----------------------------------
|
||||||
(1,1,localhost,57637,default,f)
|
(1,1,localhost,57637,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
master_add_node
|
master_add_node
|
||||||
---------------------------------
|
-----------------------------------
|
||||||
(2,2,localhost,57638,default,f)
|
(2,2,localhost,57638,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- create a table with a SERIAL column
|
-- create a table with a SERIAL column
|
||||||
|
|
|
@ -217,16 +217,16 @@ SELECT master_add_node('localhost', 5432);
|
||||||
ERROR: operation is not allowed on this node
|
ERROR: operation is not allowed on this node
|
||||||
HINT: Connect to the coordinator and run it again.
|
HINT: Connect to the coordinator and run it again.
|
||||||
SELECT * FROM pg_dist_node WHERE nodename='localhost' AND nodeport=5432;
|
SELECT * FROM pg_dist_node WHERE nodename='localhost' AND nodeport=5432;
|
||||||
nodeid | groupid | nodename | nodeport | noderack | hasmetadata
|
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | noderole
|
||||||
--------+---------+----------+----------+----------+-------------
|
--------+---------+----------+----------+----------+-------------+----------
|
||||||
(0 rows)
|
(0 rows)
|
||||||
|
|
||||||
-- master_remove_node
|
-- master_remove_node
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
SELECT master_add_node('localhost', 5432);
|
SELECT master_add_node('localhost', 5432);
|
||||||
master_add_node
|
master_add_node
|
||||||
--------------------------------------------
|
----------------------------------------------
|
||||||
(1370000,1370000,localhost,5432,default,f)
|
(1370000,1370000,localhost,5432,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
|
@ -234,9 +234,9 @@ SELECT master_remove_node('localhost', 5432);
|
||||||
ERROR: operation is not allowed on this node
|
ERROR: operation is not allowed on this node
|
||||||
HINT: Connect to the coordinator and run it again.
|
HINT: Connect to the coordinator and run it again.
|
||||||
SELECT * FROM pg_dist_node WHERE nodename='localhost' AND nodeport=5432;
|
SELECT * FROM pg_dist_node WHERE nodename='localhost' AND nodeport=5432;
|
||||||
nodeid | groupid | nodename | nodeport | noderack | hasmetadata
|
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | noderole
|
||||||
---------+---------+-----------+----------+----------+-------------
|
---------+---------+-----------+----------+----------+-------------+----------
|
||||||
1370000 | 1370000 | localhost | 5432 | default | f
|
1370000 | 1370000 | localhost | 5432 | default | f | p
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
|
|
@ -31,6 +31,7 @@ test: multi_master_protocol
|
||||||
test: multi_load_data
|
test: multi_load_data
|
||||||
|
|
||||||
test: multi_insert_select
|
test: multi_insert_select
|
||||||
|
test: multi_stage_protocol
|
||||||
|
|
||||||
# ----------
|
# ----------
|
||||||
# Miscellaneous tests to check our query planning behavior
|
# Miscellaneous tests to check our query planning behavior
|
||||||
|
|
|
@ -770,8 +770,8 @@ SELECT master_add_node('localhost', :worker_1_port);
|
||||||
NOTICE: Replicating reference table "nation" to all workers
|
NOTICE: Replicating reference table "nation" to all workers
|
||||||
NOTICE: Replicating reference table "supplier" to all workers
|
NOTICE: Replicating reference table "supplier" to all workers
|
||||||
master_add_node
|
master_add_node
|
||||||
---------------------------------
|
-----------------------------------
|
||||||
(3,3,localhost,57637,default,f)
|
(3,3,localhost,57637,default,f,p)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
RESET citus.shard_replication_factor;
|
RESET citus.shard_replication_factor;
|
||||||
|
|
|
@ -75,6 +75,7 @@ ALTER EXTENSION citus UPDATE TO '6.1-14';
|
||||||
ALTER EXTENSION citus UPDATE TO '6.1-15';
|
ALTER EXTENSION citus UPDATE TO '6.1-15';
|
||||||
ALTER EXTENSION citus UPDATE TO '6.1-16';
|
ALTER EXTENSION citus UPDATE TO '6.1-16';
|
||||||
ALTER EXTENSION citus UPDATE TO '6.1-17';
|
ALTER EXTENSION citus UPDATE TO '6.1-17';
|
||||||
|
ALTER EXTENSION citus UPDATE TO '6.1-18';
|
||||||
|
|
||||||
-- ensure no objects were created outside pg_catalog
|
-- ensure no objects were created outside pg_catalog
|
||||||
SELECT COUNT(*)
|
SELECT COUNT(*)
|
||||||
|
|
|
@ -0,0 +1,41 @@
|
||||||
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1420000;
|
||||||
|
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1420000;
|
||||||
|
|
||||||
|
-- run master_create_empty_shard with differing placement policies
|
||||||
|
|
||||||
|
CREATE TABLE append_table (a int);
|
||||||
|
SELECT master_create_distributed_table('append_table', 'a', 'append');
|
||||||
|
|
||||||
|
SET citus.shard_placement_policy TO 'round-robin';
|
||||||
|
SELECT master_create_empty_shard('append_table');
|
||||||
|
SELECT master_create_empty_shard('append_table');
|
||||||
|
|
||||||
|
SELECT * FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'append_table'::regclass);
|
||||||
|
|
||||||
|
UPDATE pg_dist_node SET noderole = 's' WHERE nodeport = :worker_2_port;
|
||||||
|
|
||||||
|
-- round robin only considers primary nodes
|
||||||
|
SELECT master_create_empty_shard('append_table');
|
||||||
|
|
||||||
|
SET citus.shard_replication_factor = 1;
|
||||||
|
SET citus.shard_placement_policy TO 'random';
|
||||||
|
|
||||||
|
-- make sure it works when there's only one primary
|
||||||
|
SELECT master_create_empty_shard('append_table');
|
||||||
|
|
||||||
|
SELECT * FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'append_table'::regclass);
|
||||||
|
|
||||||
|
SELECT setseed(0.5);
|
||||||
|
UPDATE pg_dist_node SET noderole = 'p' WHERE nodeport = :worker_2_port;
|
||||||
|
|
||||||
|
SELECT master_create_empty_shard('append_table');
|
||||||
|
SELECT master_create_empty_shard('append_table');
|
||||||
|
SELECT master_create_empty_shard('append_table');
|
||||||
|
SELECT master_create_empty_shard('append_table');
|
||||||
|
SELECT master_create_empty_shard('append_table');
|
||||||
|
SELECT master_create_empty_shard('append_table');
|
||||||
|
|
||||||
|
SELECT * FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'append_table'::regclass);
|
||||||
|
|
||||||
|
-- clean up
|
||||||
|
DROP TABLE append_table;
|
Loading…
Reference in New Issue