mirror of https://github.com/citusdata/citus.git
Add pg_dist_node.nodecluster
parent
74ce4faab5
commit
5618e69386
|
@ -11,7 +11,7 @@ EXTVERSIONS = 5.0 5.0-1 5.0-2 \
|
||||||
6.0-1 6.0-2 6.0-3 6.0-4 6.0-5 6.0-6 6.0-7 6.0-8 6.0-9 6.0-10 6.0-11 6.0-12 6.0-13 6.0-14 6.0-15 6.0-16 6.0-17 6.0-18 \
|
6.0-1 6.0-2 6.0-3 6.0-4 6.0-5 6.0-6 6.0-7 6.0-8 6.0-9 6.0-10 6.0-11 6.0-12 6.0-13 6.0-14 6.0-15 6.0-16 6.0-17 6.0-18 \
|
||||||
6.1-1 6.1-2 6.1-3 6.1-4 6.1-5 6.1-6 6.1-7 6.1-8 6.1-9 6.1-10 6.1-11 6.1-12 6.1-13 6.1-14 6.1-15 6.1-16 6.1-17 \
|
6.1-1 6.1-2 6.1-3 6.1-4 6.1-5 6.1-6 6.1-7 6.1-8 6.1-9 6.1-10 6.1-11 6.1-12 6.1-13 6.1-14 6.1-15 6.1-16 6.1-17 \
|
||||||
6.2-1 6.2-2 6.2-3 6.2-4 \
|
6.2-1 6.2-2 6.2-3 6.2-4 \
|
||||||
7.0-1 7.0-2 7.0-3 7.0-4 7.0-5 7.0-6 7.0-7 7.0-8
|
7.0-1 7.0-2 7.0-3 7.0-4 7.0-5 7.0-6 7.0-7 7.0-8 7.0-9
|
||||||
|
|
||||||
# All citus--*.sql files in the source directory
|
# All citus--*.sql files in the source directory
|
||||||
DATA = $(patsubst $(citus_abs_srcdir)/%.sql,%.sql,$(wildcard $(citus_abs_srcdir)/$(EXTENSION)--*--*.sql))
|
DATA = $(patsubst $(citus_abs_srcdir)/%.sql,%.sql,$(wildcard $(citus_abs_srcdir)/$(EXTENSION)--*--*.sql))
|
||||||
|
@ -155,6 +155,8 @@ $(EXTENSION)--7.0-7.sql: $(EXTENSION)--7.0-6.sql $(EXTENSION)--7.0-6--7.0-7.sql
|
||||||
cat $^ > $@
|
cat $^ > $@
|
||||||
$(EXTENSION)--7.0-8.sql: $(EXTENSION)--7.0-7.sql $(EXTENSION)--7.0-7--7.0-8.sql
|
$(EXTENSION)--7.0-8.sql: $(EXTENSION)--7.0-7.sql $(EXTENSION)--7.0-7--7.0-8.sql
|
||||||
cat $^ > $@
|
cat $^ > $@
|
||||||
|
$(EXTENSION)--7.0-9.sql: $(EXTENSION)--7.0-8.sql $(EXTENSION)--7.0-8--7.0-9.sql
|
||||||
|
cat $^ > $@
|
||||||
|
|
||||||
NO_PGXS = 1
|
NO_PGXS = 1
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,70 @@
|
||||||
|
/* citus-7.0-8--7.0-9 */
|
||||||
|
|
||||||
|
SET search_path = 'pg_catalog';
|
||||||
|
|
||||||
|
ALTER TABLE pg_dist_node ADD COLUMN nodecluster name NOT NULL DEFAULT 'default';
|
||||||
|
|
||||||
|
DROP FUNCTION master_add_node(text, integer, integer, noderole);
|
||||||
|
CREATE FUNCTION master_add_node(nodename text,
|
||||||
|
nodeport integer,
|
||||||
|
groupid integer default 0,
|
||||||
|
noderole noderole default 'primary',
|
||||||
|
nodecluster name default 'default',
|
||||||
|
OUT nodeid integer,
|
||||||
|
OUT groupid integer,
|
||||||
|
OUT nodename text,
|
||||||
|
OUT nodeport integer,
|
||||||
|
OUT noderack text,
|
||||||
|
OUT hasmetadata boolean,
|
||||||
|
OUT isactive bool,
|
||||||
|
OUT noderole noderole,
|
||||||
|
OUT nodecluster name)
|
||||||
|
RETURNS record
|
||||||
|
LANGUAGE C STRICT
|
||||||
|
AS 'MODULE_PATHNAME', $$master_add_node$$;
|
||||||
|
COMMENT ON FUNCTION master_add_node(nodename text, nodeport integer,
|
||||||
|
groupid integer, noderole noderole, nodecluster name)
|
||||||
|
IS 'add node to the cluster';
|
||||||
|
|
||||||
|
DROP FUNCTION master_add_inactive_node(text, integer, integer, noderole);
|
||||||
|
CREATE FUNCTION master_add_inactive_node(nodename text,
|
||||||
|
nodeport integer,
|
||||||
|
groupid integer default 0,
|
||||||
|
noderole noderole default 'primary',
|
||||||
|
nodecluster name default 'default',
|
||||||
|
OUT nodeid integer,
|
||||||
|
OUT groupid integer,
|
||||||
|
OUT nodename text,
|
||||||
|
OUT nodeport integer,
|
||||||
|
OUT noderack text,
|
||||||
|
OUT hasmetadata boolean,
|
||||||
|
OUT isactive bool,
|
||||||
|
OUT noderole noderole,
|
||||||
|
OUT nodecluster name)
|
||||||
|
RETURNS record
|
||||||
|
LANGUAGE C STRICT
|
||||||
|
AS 'MODULE_PATHNAME',$$master_add_inactive_node$$;
|
||||||
|
COMMENT ON FUNCTION master_add_inactive_node(nodename text,nodeport integer,
|
||||||
|
groupid integer, noderole noderole,
|
||||||
|
nodecluster name)
|
||||||
|
IS 'prepare node by adding it to pg_dist_node';
|
||||||
|
|
||||||
|
DROP FUNCTION master_activate_node(text, integer);
|
||||||
|
CREATE FUNCTION master_activate_node(nodename text,
|
||||||
|
nodeport integer,
|
||||||
|
OUT nodeid integer,
|
||||||
|
OUT groupid integer,
|
||||||
|
OUT nodename text,
|
||||||
|
OUT nodeport integer,
|
||||||
|
OUT noderack text,
|
||||||
|
OUT hasmetadata boolean,
|
||||||
|
OUT isactive bool,
|
||||||
|
OUT noderole noderole,
|
||||||
|
OUT nodecluster name)
|
||||||
|
RETURNS record
|
||||||
|
LANGUAGE C STRICT
|
||||||
|
AS 'MODULE_PATHNAME',$$master_activate_node$$;
|
||||||
|
COMMENT ON FUNCTION master_activate_node(nodename text, nodeport integer)
|
||||||
|
IS 'activate a node which is in the cluster';
|
||||||
|
|
||||||
|
RESET search_path;
|
|
@ -1,6 +1,6 @@
|
||||||
# Citus extension
|
# Citus extension
|
||||||
comment = 'Citus distributed database'
|
comment = 'Citus distributed database'
|
||||||
default_version = '7.0-8'
|
default_version = '7.0-9'
|
||||||
module_pathname = '$libdir/citus'
|
module_pathname = '$libdir/citus'
|
||||||
relocatable = false
|
relocatable = false
|
||||||
schema = pg_catalog
|
schema = pg_catalog
|
||||||
|
|
|
@ -426,7 +426,7 @@ NodeListInsertCommand(List *workerNodeList)
|
||||||
/* generate the query without any values yet */
|
/* generate the query without any values yet */
|
||||||
appendStringInfo(nodeListInsertCommand,
|
appendStringInfo(nodeListInsertCommand,
|
||||||
"INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, "
|
"INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, "
|
||||||
"noderack, hasmetadata, isactive, noderole) VALUES ");
|
"noderack, hasmetadata, isactive, noderole, nodecluster) VALUES ");
|
||||||
|
|
||||||
/* iterate over the worker nodes, add the values */
|
/* iterate over the worker nodes, add the values */
|
||||||
foreach(workerNodeCell, workerNodeList)
|
foreach(workerNodeCell, workerNodeList)
|
||||||
|
@ -440,7 +440,7 @@ NodeListInsertCommand(List *workerNodeList)
|
||||||
char *nodeRoleString = DatumGetCString(nodeRoleStringDatum);
|
char *nodeRoleString = DatumGetCString(nodeRoleStringDatum);
|
||||||
|
|
||||||
appendStringInfo(nodeListInsertCommand,
|
appendStringInfo(nodeListInsertCommand,
|
||||||
"(%d, %d, %s, %d, %s, %s, %s, '%s'::noderole)",
|
"(%d, %d, %s, %d, %s, %s, %s, '%s'::noderole, %s)",
|
||||||
workerNode->nodeId,
|
workerNode->nodeId,
|
||||||
workerNode->groupId,
|
workerNode->groupId,
|
||||||
quote_literal_cstr(workerNode->workerName),
|
quote_literal_cstr(workerNode->workerName),
|
||||||
|
@ -448,7 +448,8 @@ NodeListInsertCommand(List *workerNodeList)
|
||||||
quote_literal_cstr(workerNode->workerRack),
|
quote_literal_cstr(workerNode->workerRack),
|
||||||
hasMetadataString,
|
hasMetadataString,
|
||||||
isActiveString,
|
isActiveString,
|
||||||
nodeRoleString);
|
nodeRoleString,
|
||||||
|
quote_literal_cstr(workerNode->nodeCluster));
|
||||||
|
|
||||||
processedWorkerNodeCount++;
|
processedWorkerNodeCount++;
|
||||||
if (processedWorkerNodeCount != workerCount)
|
if (processedWorkerNodeCount != workerCount)
|
||||||
|
|
|
@ -2349,6 +2349,7 @@ InitializeWorkerNodeCache(void)
|
||||||
workerNode->hasMetadata = currentNode->hasMetadata;
|
workerNode->hasMetadata = currentNode->hasMetadata;
|
||||||
workerNode->isActive = currentNode->isActive;
|
workerNode->isActive = currentNode->isActive;
|
||||||
workerNode->nodeRole = currentNode->nodeRole;
|
workerNode->nodeRole = currentNode->nodeRole;
|
||||||
|
strlcpy(workerNode->nodeCluster, currentNode->nodeCluster, NAMEDATALEN);
|
||||||
|
|
||||||
if (handleFound)
|
if (handleFound)
|
||||||
{
|
{
|
||||||
|
|
|
@ -55,7 +55,7 @@ static Datum ActivateNode(char *nodeName, int nodePort);
|
||||||
static void RemoveNodeFromCluster(char *nodeName, int32 nodePort);
|
static void RemoveNodeFromCluster(char *nodeName, int32 nodePort);
|
||||||
static Datum AddNodeMetadata(char *nodeName, int32 nodePort, int32 groupId,
|
static Datum AddNodeMetadata(char *nodeName, int32 nodePort, int32 groupId,
|
||||||
char *nodeRack, bool hasMetadata, bool isActive,
|
char *nodeRack, bool hasMetadata, bool isActive,
|
||||||
Oid nodeRole, bool *nodeAlreadyExists);
|
Oid nodeRole, char *nodeCluster, bool *nodeAlreadyExists);
|
||||||
static uint32 CountPrimariesWithMetadata();
|
static uint32 CountPrimariesWithMetadata();
|
||||||
static void SetNodeState(char *nodeName, int32 nodePort, bool isActive);
|
static void SetNodeState(char *nodeName, int32 nodePort, bool isActive);
|
||||||
static HeapTuple GetNodeTuple(char *nodeName, int32 nodePort);
|
static HeapTuple GetNodeTuple(char *nodeName, int32 nodePort);
|
||||||
|
@ -64,7 +64,8 @@ static int32 GetNextGroupId(void);
|
||||||
static uint32 GetMaxGroupId(void);
|
static uint32 GetMaxGroupId(void);
|
||||||
static int GetNextNodeId(void);
|
static int GetNextNodeId(void);
|
||||||
static void InsertNodeRow(int nodeid, char *nodename, int32 nodeport, uint32 groupId,
|
static void InsertNodeRow(int nodeid, char *nodename, int32 nodeport, uint32 groupId,
|
||||||
char *nodeRack, bool hasMetadata, bool isActive, Oid nodeRole);
|
char *nodeRack, bool hasMetadata, bool isActive, Oid nodeRole,
|
||||||
|
char *nodeCluster);
|
||||||
static void DeleteNodeRow(char *nodename, int32 nodeport);
|
static void DeleteNodeRow(char *nodename, int32 nodeport);
|
||||||
static List * ParseWorkerNodeFileAndRename(void);
|
static List * ParseWorkerNodeFileAndRename(void);
|
||||||
static WorkerNode * TupleToWorkerNode(TupleDesc tupleDescriptor, HeapTuple heapTuple);
|
static WorkerNode * TupleToWorkerNode(TupleDesc tupleDescriptor, HeapTuple heapTuple);
|
||||||
|
@ -91,6 +92,7 @@ master_add_node(PG_FUNCTION_ARGS)
|
||||||
char *nodeNameString = text_to_cstring(nodeName);
|
char *nodeNameString = text_to_cstring(nodeName);
|
||||||
int32 groupId = PG_GETARG_INT32(2);
|
int32 groupId = PG_GETARG_INT32(2);
|
||||||
Oid nodeRole = InvalidOid;
|
Oid nodeRole = InvalidOid;
|
||||||
|
char *nodeClusterString = NULL;
|
||||||
char *nodeRack = WORKER_DEFAULT_RACK;
|
char *nodeRack = WORKER_DEFAULT_RACK;
|
||||||
bool hasMetadata = false;
|
bool hasMetadata = false;
|
||||||
bool isActive = false;
|
bool isActive = false;
|
||||||
|
@ -99,18 +101,26 @@ master_add_node(PG_FUNCTION_ARGS)
|
||||||
|
|
||||||
CheckCitusVersion(ERROR);
|
CheckCitusVersion(ERROR);
|
||||||
|
|
||||||
/* during tests this function is called before nodeRole has been created */
|
/*
|
||||||
|
* During tests this function is called before nodeRole and nodeCluster have been
|
||||||
|
* created.
|
||||||
|
*/
|
||||||
if (PG_NARGS() == 3)
|
if (PG_NARGS() == 3)
|
||||||
{
|
{
|
||||||
nodeRole = InvalidOid;
|
nodeRole = InvalidOid;
|
||||||
|
nodeClusterString = "default";
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
Name nodeClusterName = PG_GETARG_NAME(4);
|
||||||
|
nodeClusterString = NameStr(*nodeClusterName);
|
||||||
|
|
||||||
nodeRole = PG_GETARG_OID(3);
|
nodeRole = PG_GETARG_OID(3);
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeRecord = AddNodeMetadata(nodeNameString, nodePort, groupId, nodeRack,
|
nodeRecord = AddNodeMetadata(nodeNameString, nodePort, groupId, nodeRack,
|
||||||
hasMetadata, isActive, nodeRole, &nodeAlreadyExists);
|
hasMetadata, isActive, nodeRole, nodeClusterString,
|
||||||
|
&nodeAlreadyExists);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* After adding new node, if the node did not already exist, we will activate
|
* After adding new node, if the node did not already exist, we will activate
|
||||||
|
@ -139,6 +149,8 @@ master_add_inactive_node(PG_FUNCTION_ARGS)
|
||||||
char *nodeNameString = text_to_cstring(nodeName);
|
char *nodeNameString = text_to_cstring(nodeName);
|
||||||
int32 groupId = PG_GETARG_INT32(2);
|
int32 groupId = PG_GETARG_INT32(2);
|
||||||
Oid nodeRole = PG_GETARG_OID(3);
|
Oid nodeRole = PG_GETARG_OID(3);
|
||||||
|
Name nodeClusterName = PG_GETARG_NAME(4);
|
||||||
|
char *nodeClusterString = NameStr(*nodeClusterName);
|
||||||
char *nodeRack = WORKER_DEFAULT_RACK;
|
char *nodeRack = WORKER_DEFAULT_RACK;
|
||||||
bool hasMetadata = false;
|
bool hasMetadata = false;
|
||||||
bool isActive = false;
|
bool isActive = false;
|
||||||
|
@ -148,7 +160,8 @@ master_add_inactive_node(PG_FUNCTION_ARGS)
|
||||||
CheckCitusVersion(ERROR);
|
CheckCitusVersion(ERROR);
|
||||||
|
|
||||||
nodeRecord = AddNodeMetadata(nodeNameString, nodePort, groupId, nodeRack,
|
nodeRecord = AddNodeMetadata(nodeNameString, nodePort, groupId, nodeRack,
|
||||||
hasMetadata, isActive, nodeRole, &nodeAlreadyExists);
|
hasMetadata, isActive, nodeRole, nodeClusterString,
|
||||||
|
&nodeAlreadyExists);
|
||||||
|
|
||||||
PG_RETURN_DATUM(nodeRecord);
|
PG_RETURN_DATUM(nodeRecord);
|
||||||
}
|
}
|
||||||
|
@ -379,7 +392,10 @@ master_initialize_node_metadata(PG_FUNCTION_ARGS)
|
||||||
ListCell *workerNodeCell = NULL;
|
ListCell *workerNodeCell = NULL;
|
||||||
List *workerNodes = NULL;
|
List *workerNodes = NULL;
|
||||||
bool nodeAlreadyExists = false;
|
bool nodeAlreadyExists = false;
|
||||||
Oid nodeRole = InvalidOid; /* nodeRole doesn't exist when this function is called */
|
|
||||||
|
/* nodeRole and nodeCluster don't exist when this function is caled */
|
||||||
|
Oid nodeRole = InvalidOid;
|
||||||
|
char *nodeCluster = "default";
|
||||||
|
|
||||||
CheckCitusVersion(ERROR);
|
CheckCitusVersion(ERROR);
|
||||||
|
|
||||||
|
@ -390,7 +406,7 @@ master_initialize_node_metadata(PG_FUNCTION_ARGS)
|
||||||
|
|
||||||
AddNodeMetadata(workerNode->workerName, workerNode->workerPort, 0,
|
AddNodeMetadata(workerNode->workerName, workerNode->workerPort, 0,
|
||||||
workerNode->workerRack, false, workerNode->isActive,
|
workerNode->workerRack, false, workerNode->isActive,
|
||||||
nodeRole, &nodeAlreadyExists);
|
nodeRole, nodeCluster, &nodeAlreadyExists);
|
||||||
}
|
}
|
||||||
|
|
||||||
PG_RETURN_BOOL(true);
|
PG_RETURN_BOOL(true);
|
||||||
|
@ -664,7 +680,8 @@ CountPrimariesWithMetadata()
|
||||||
*/
|
*/
|
||||||
static Datum
|
static Datum
|
||||||
AddNodeMetadata(char *nodeName, int32 nodePort, int32 groupId, char *nodeRack,
|
AddNodeMetadata(char *nodeName, int32 nodePort, int32 groupId, char *nodeRack,
|
||||||
bool hasMetadata, bool isActive, Oid nodeRole, bool *nodeAlreadyExists)
|
bool hasMetadata, bool isActive, Oid nodeRole, char *nodeCluster,
|
||||||
|
bool *nodeAlreadyExists)
|
||||||
{
|
{
|
||||||
int nextNodeIdInt = 0;
|
int nextNodeIdInt = 0;
|
||||||
Datum returnData = 0;
|
Datum returnData = 0;
|
||||||
|
@ -726,7 +743,7 @@ AddNodeMetadata(char *nodeName, int32 nodePort, int32 groupId, char *nodeRack,
|
||||||
nextNodeIdInt = GetNextNodeId();
|
nextNodeIdInt = GetNextNodeId();
|
||||||
|
|
||||||
InsertNodeRow(nextNodeIdInt, nodeName, nodePort, groupId, nodeRack, hasMetadata,
|
InsertNodeRow(nextNodeIdInt, nodeName, nodePort, groupId, nodeRack, hasMetadata,
|
||||||
isActive, nodeRole);
|
isActive, nodeRole, nodeCluster);
|
||||||
|
|
||||||
workerNode = FindWorkerNode(nodeName, nodePort);
|
workerNode = FindWorkerNode(nodeName, nodePort);
|
||||||
|
|
||||||
|
@ -841,6 +858,9 @@ GenerateNodeTuple(WorkerNode *workerNode)
|
||||||
Datum values[Natts_pg_dist_node];
|
Datum values[Natts_pg_dist_node];
|
||||||
bool isNulls[Natts_pg_dist_node];
|
bool isNulls[Natts_pg_dist_node];
|
||||||
|
|
||||||
|
Datum nodeClusterStringDatum = CStringGetDatum(workerNode->nodeCluster);
|
||||||
|
Datum nodeClusterNameDatum = DirectFunctionCall1(namein, nodeClusterStringDatum);
|
||||||
|
|
||||||
/* form new shard tuple */
|
/* form new shard tuple */
|
||||||
memset(values, 0, sizeof(values));
|
memset(values, 0, sizeof(values));
|
||||||
memset(isNulls, false, sizeof(isNulls));
|
memset(isNulls, false, sizeof(isNulls));
|
||||||
|
@ -853,6 +873,7 @@ GenerateNodeTuple(WorkerNode *workerNode)
|
||||||
values[Anum_pg_dist_node_hasmetadata - 1] = BoolGetDatum(workerNode->hasMetadata);
|
values[Anum_pg_dist_node_hasmetadata - 1] = BoolGetDatum(workerNode->hasMetadata);
|
||||||
values[Anum_pg_dist_node_isactive - 1] = BoolGetDatum(workerNode->isActive);
|
values[Anum_pg_dist_node_isactive - 1] = BoolGetDatum(workerNode->isActive);
|
||||||
values[Anum_pg_dist_node_noderole - 1] = ObjectIdGetDatum(workerNode->nodeRole);
|
values[Anum_pg_dist_node_noderole - 1] = ObjectIdGetDatum(workerNode->nodeRole);
|
||||||
|
values[Anum_pg_dist_node_nodecluster - 1] = nodeClusterNameDatum;
|
||||||
|
|
||||||
/* open shard relation and generate new tuple */
|
/* open shard relation and generate new tuple */
|
||||||
pgDistNode = heap_open(DistNodeRelationId(), AccessShareLock);
|
pgDistNode = heap_open(DistNodeRelationId(), AccessShareLock);
|
||||||
|
@ -994,7 +1015,7 @@ EnsureCoordinator(void)
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
InsertNodeRow(int nodeid, char *nodeName, int32 nodePort, uint32 groupId, char *nodeRack,
|
InsertNodeRow(int nodeid, char *nodeName, int32 nodePort, uint32 groupId, char *nodeRack,
|
||||||
bool hasMetadata, bool isActive, Oid nodeRole)
|
bool hasMetadata, bool isActive, Oid nodeRole, char *nodeCluster)
|
||||||
{
|
{
|
||||||
Relation pgDistNode = NULL;
|
Relation pgDistNode = NULL;
|
||||||
TupleDesc tupleDescriptor = NULL;
|
TupleDesc tupleDescriptor = NULL;
|
||||||
|
@ -1002,6 +1023,9 @@ InsertNodeRow(int nodeid, char *nodeName, int32 nodePort, uint32 groupId, char *
|
||||||
Datum values[Natts_pg_dist_node];
|
Datum values[Natts_pg_dist_node];
|
||||||
bool isNulls[Natts_pg_dist_node];
|
bool isNulls[Natts_pg_dist_node];
|
||||||
|
|
||||||
|
Datum nodeClusterStringDatum = CStringGetDatum(nodeCluster);
|
||||||
|
Datum nodeClusterNameDatum = DirectFunctionCall1(namein, nodeClusterStringDatum);
|
||||||
|
|
||||||
/* form new shard tuple */
|
/* form new shard tuple */
|
||||||
memset(values, 0, sizeof(values));
|
memset(values, 0, sizeof(values));
|
||||||
memset(isNulls, false, sizeof(isNulls));
|
memset(isNulls, false, sizeof(isNulls));
|
||||||
|
@ -1014,6 +1038,7 @@ InsertNodeRow(int nodeid, char *nodeName, int32 nodePort, uint32 groupId, char *
|
||||||
values[Anum_pg_dist_node_hasmetadata - 1] = BoolGetDatum(hasMetadata);
|
values[Anum_pg_dist_node_hasmetadata - 1] = BoolGetDatum(hasMetadata);
|
||||||
values[Anum_pg_dist_node_isactive - 1] = BoolGetDatum(isActive);
|
values[Anum_pg_dist_node_isactive - 1] = BoolGetDatum(isActive);
|
||||||
values[Anum_pg_dist_node_noderole - 1] = ObjectIdGetDatum(nodeRole);
|
values[Anum_pg_dist_node_noderole - 1] = ObjectIdGetDatum(nodeRole);
|
||||||
|
values[Anum_pg_dist_node_nodecluster - 1] = nodeClusterNameDatum;
|
||||||
|
|
||||||
/* open shard relation and insert new tuple */
|
/* open shard relation and insert new tuple */
|
||||||
pgDistNode = heap_open(DistNodeRelationId(), RowExclusiveLock);
|
pgDistNode = heap_open(DistNodeRelationId(), RowExclusiveLock);
|
||||||
|
@ -1256,6 +1281,8 @@ TupleToWorkerNode(TupleDesc tupleDescriptor, HeapTuple heapTuple)
|
||||||
tupleDescriptor, &isNull);
|
tupleDescriptor, &isNull);
|
||||||
Datum nodeRole = heap_getattr(heapTuple, Anum_pg_dist_node_noderole,
|
Datum nodeRole = heap_getattr(heapTuple, Anum_pg_dist_node_noderole,
|
||||||
tupleDescriptor, &isNull);
|
tupleDescriptor, &isNull);
|
||||||
|
Datum nodeCluster = heap_getattr(heapTuple, Anum_pg_dist_node_nodecluster,
|
||||||
|
tupleDescriptor, &isNull);
|
||||||
|
|
||||||
Assert(!HeapTupleHasNulls(heapTuple));
|
Assert(!HeapTupleHasNulls(heapTuple));
|
||||||
|
|
||||||
|
@ -1269,6 +1296,12 @@ TupleToWorkerNode(TupleDesc tupleDescriptor, HeapTuple heapTuple)
|
||||||
workerNode->isActive = DatumGetBool(isActive);
|
workerNode->isActive = DatumGetBool(isActive);
|
||||||
workerNode->nodeRole = DatumGetObjectId(nodeRole);
|
workerNode->nodeRole = DatumGetObjectId(nodeRole);
|
||||||
|
|
||||||
|
{
|
||||||
|
Name nodeClusterName = DatumGetName(nodeCluster);
|
||||||
|
char *nodeClusterString = NameStr(*nodeClusterName);
|
||||||
|
strlcpy(workerNode->nodeCluster, nodeClusterString, NAMEDATALEN);
|
||||||
|
}
|
||||||
|
|
||||||
return workerNode;
|
return workerNode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -11,30 +11,6 @@
|
||||||
#ifndef PG_DIST_NODE_H
|
#ifndef PG_DIST_NODE_H
|
||||||
#define PG_DIST_NODE_H
|
#define PG_DIST_NODE_H
|
||||||
|
|
||||||
/* ----------------
|
|
||||||
* pg_dist_node definition.
|
|
||||||
* ----------------
|
|
||||||
*/
|
|
||||||
typedef struct FormData_pg_dist_node
|
|
||||||
{
|
|
||||||
int nodeid;
|
|
||||||
int groupid;
|
|
||||||
#ifdef CATALOG_VARLEN
|
|
||||||
text nodename;
|
|
||||||
int nodeport;
|
|
||||||
bool hasmetadata;
|
|
||||||
bool isactive
|
|
||||||
Oid noderole;
|
|
||||||
#endif
|
|
||||||
} FormData_pg_dist_node;
|
|
||||||
|
|
||||||
/* ----------------
|
|
||||||
* Form_pg_dist_partitions corresponds to a pointer to a tuple with
|
|
||||||
* the format of pg_dist_partitions relation.
|
|
||||||
* ----------------
|
|
||||||
*/
|
|
||||||
typedef FormData_pg_dist_node *Form_pg_dist_node;
|
|
||||||
|
|
||||||
/* ----------------
|
/* ----------------
|
||||||
* compiler constants for pg_dist_node
|
* compiler constants for pg_dist_node
|
||||||
* ----------------
|
* ----------------
|
||||||
|
@ -44,7 +20,7 @@ typedef FormData_pg_dist_node *Form_pg_dist_node;
|
||||||
* in particular their OUT parameters) must be changed whenever the definition of
|
* in particular their OUT parameters) must be changed whenever the definition of
|
||||||
* pg_dist_node changes.
|
* pg_dist_node changes.
|
||||||
*/
|
*/
|
||||||
#define Natts_pg_dist_node 8
|
#define Natts_pg_dist_node 9
|
||||||
#define Anum_pg_dist_node_nodeid 1
|
#define Anum_pg_dist_node_nodeid 1
|
||||||
#define Anum_pg_dist_node_groupid 2
|
#define Anum_pg_dist_node_groupid 2
|
||||||
#define Anum_pg_dist_node_nodename 3
|
#define Anum_pg_dist_node_nodename 3
|
||||||
|
@ -53,6 +29,7 @@ typedef FormData_pg_dist_node *Form_pg_dist_node;
|
||||||
#define Anum_pg_dist_node_hasmetadata 6
|
#define Anum_pg_dist_node_hasmetadata 6
|
||||||
#define Anum_pg_dist_node_isactive 7
|
#define Anum_pg_dist_node_isactive 7
|
||||||
#define Anum_pg_dist_node_noderole 8
|
#define Anum_pg_dist_node_noderole 8
|
||||||
|
#define Anum_pg_dist_node_nodecluster 9
|
||||||
|
|
||||||
#define GROUPID_SEQUENCE_NAME "pg_dist_groupid_seq"
|
#define GROUPID_SEQUENCE_NAME "pg_dist_groupid_seq"
|
||||||
#define NODEID_SEQUENCE_NAME "pg_dist_node_nodeid_seq"
|
#define NODEID_SEQUENCE_NAME "pg_dist_node_nodeid_seq"
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
#include "nodes/pg_list.h"
|
#include "nodes/pg_list.h"
|
||||||
|
|
||||||
|
|
||||||
/* Worker node name's maximum length */
|
/* Worker nodeName's, nodePort's, and nodeCluster's maximum length */
|
||||||
#define WORKER_LENGTH 256
|
#define WORKER_LENGTH 256
|
||||||
|
|
||||||
/* Maximum length of worker port number (represented as string) */
|
/* Maximum length of worker port number (represented as string) */
|
||||||
|
@ -44,6 +44,7 @@ typedef struct WorkerNode
|
||||||
bool hasMetadata; /* node gets metadata changes */
|
bool hasMetadata; /* node gets metadata changes */
|
||||||
bool isActive; /* node's state */
|
bool isActive; /* node's state */
|
||||||
Oid nodeRole; /* the node's role in its group */
|
Oid nodeRole; /* the node's role in its group */
|
||||||
|
char nodeCluster[NAMEDATALEN]; /* the cluster the node is a part of */
|
||||||
} WorkerNode;
|
} WorkerNode;
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -11,12 +11,8 @@ ALTER EXTENSION citus UPDATE TO '7.0-3';
|
||||||
ERROR: There is no node at "localhost:57637"
|
ERROR: There is no node at "localhost:57637"
|
||||||
CONTEXT: PL/pgSQL function citus.find_groupid_for_node(text,integer) line 6 at RAISE
|
CONTEXT: PL/pgSQL function citus.find_groupid_for_node(text,integer) line 6 at RAISE
|
||||||
-- if you add a matching worker the upgrade should succeed
|
-- if you add a matching worker the upgrade should succeed
|
||||||
SELECT master_add_node('localhost', :worker_1_port);
|
INSERT INTO pg_dist_node (nodename, nodeport, groupid)
|
||||||
master_add_node
|
VALUES ('localhost', :worker_1_port, 1);
|
||||||
-----------------------------------
|
|
||||||
(1,1,localhost,57637,default,f,t)
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
ALTER EXTENSION citus UPDATE TO '7.0-3';
|
ALTER EXTENSION citus UPDATE TO '7.0-3';
|
||||||
SELECT * FROM pg_dist_placement;
|
SELECT * FROM pg_dist_placement;
|
||||||
placementid | shardid | shardstate | shardlength | groupid
|
placementid | shardid | shardstate | shardlength | groupid
|
||||||
|
|
|
@ -282,15 +282,15 @@ SELECT
|
||||||
master_add_node('localhost', :worker_1_port),
|
master_add_node('localhost', :worker_1_port),
|
||||||
master_add_node('localhost', :worker_2_port);
|
master_add_node('localhost', :worker_2_port);
|
||||||
master_add_node | master_add_node
|
master_add_node | master_add_node
|
||||||
-------------------------------------------+-------------------------------------------
|
---------------------------------------------------+---------------------------------------------------
|
||||||
(8,7,localhost,57637,default,f,t,primary) | (9,8,localhost,57638,default,f,t,primary)
|
(8,7,localhost,57637,default,f,t,primary,default) | (9,8,localhost,57638,default,f,t,primary,default)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
SELECT * FROM pg_dist_node ORDER BY nodeid;
|
SELECT * FROM pg_dist_node ORDER BY nodeid;
|
||||||
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole
|
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster
|
||||||
--------+---------+-----------+----------+----------+-------------+----------+----------
|
--------+---------+-----------+----------+----------+-------------+----------+----------+-------------
|
||||||
8 | 7 | localhost | 57637 | default | f | t | primary
|
8 | 7 | localhost | 57637 | default | f | t | primary | default
|
||||||
9 | 8 | localhost | 57638 | default | f | t | primary
|
9 | 8 | localhost | 57638 | default | f | t | primary | default
|
||||||
(2 rows)
|
(2 rows)
|
||||||
|
|
||||||
-- check that mixed add/remove node commands work fine inside transaction
|
-- check that mixed add/remove node commands work fine inside transaction
|
||||||
|
|
|
@ -118,6 +118,7 @@ ALTER EXTENSION citus UPDATE TO '7.0-5';
|
||||||
ALTER EXTENSION citus UPDATE TO '7.0-6';
|
ALTER EXTENSION citus UPDATE TO '7.0-6';
|
||||||
ALTER EXTENSION citus UPDATE TO '7.0-7';
|
ALTER EXTENSION citus UPDATE TO '7.0-7';
|
||||||
ALTER EXTENSION citus UPDATE TO '7.0-8';
|
ALTER EXTENSION citus UPDATE TO '7.0-8';
|
||||||
|
ALTER EXTENSION citus UPDATE TO '7.0-9';
|
||||||
-- show running version
|
-- show running version
|
||||||
SHOW citus.version;
|
SHOW citus.version;
|
||||||
citus.version
|
citus.version
|
||||||
|
|
|
@ -28,10 +28,10 @@ SELECT * FROM pg_dist_partition WHERE partmethod='h' AND repmodel='s';
|
||||||
-- pg_dist_node entries and reference tables
|
-- pg_dist_node entries and reference tables
|
||||||
SELECT unnest(master_metadata_snapshot());
|
SELECT unnest(master_metadata_snapshot());
|
||||||
unnest
|
unnest
|
||||||
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition
|
SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition
|
||||||
TRUNCATE pg_dist_node
|
TRUNCATE pg_dist_node
|
||||||
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, isactive, noderole) VALUES (2, 2, 'localhost', 57638, 'default', FALSE, TRUE, 'primary'::noderole),(1, 1, 'localhost', 57637, 'default', FALSE, TRUE, 'primary'::noderole)
|
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, isactive, noderole, nodecluster) VALUES (2, 2, 'localhost', 57638, 'default', FALSE, TRUE, 'primary'::noderole, 'default'),(1, 1, 'localhost', 57637, 'default', FALSE, TRUE, 'primary'::noderole, 'default')
|
||||||
(3 rows)
|
(3 rows)
|
||||||
|
|
||||||
-- Create a test table with constraints and SERIAL
|
-- Create a test table with constraints and SERIAL
|
||||||
|
@ -57,7 +57,7 @@ SELECT unnest(master_metadata_snapshot());
|
||||||
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition
|
SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition
|
||||||
TRUNCATE pg_dist_node
|
TRUNCATE pg_dist_node
|
||||||
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, isactive, noderole) VALUES (2, 2, 'localhost', 57638, 'default', FALSE, TRUE, 'primary'::noderole),(1, 1, 'localhost', 57637, 'default', FALSE, TRUE, 'primary'::noderole)
|
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, isactive, noderole, nodecluster) VALUES (2, 2, 'localhost', 57638, 'default', FALSE, TRUE, 'primary'::noderole, 'default'),(1, 1, 'localhost', 57637, 'default', FALSE, TRUE, 'primary'::noderole, 'default')
|
||||||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE')
|
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE')
|
||||||
ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres
|
ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres
|
||||||
CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL)
|
CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL)
|
||||||
|
@ -78,7 +78,7 @@ SELECT unnest(master_metadata_snapshot());
|
||||||
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition
|
SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition
|
||||||
TRUNCATE pg_dist_node
|
TRUNCATE pg_dist_node
|
||||||
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, isactive, noderole) VALUES (2, 2, 'localhost', 57638, 'default', FALSE, TRUE, 'primary'::noderole),(1, 1, 'localhost', 57637, 'default', FALSE, TRUE, 'primary'::noderole)
|
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, isactive, noderole, nodecluster) VALUES (2, 2, 'localhost', 57638, 'default', FALSE, TRUE, 'primary'::noderole, 'default'),(1, 1, 'localhost', 57637, 'default', FALSE, TRUE, 'primary'::noderole, 'default')
|
||||||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE')
|
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE')
|
||||||
ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres
|
ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres
|
||||||
CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL)
|
CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL)
|
||||||
|
@ -101,7 +101,7 @@ SELECT unnest(master_metadata_snapshot());
|
||||||
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition
|
SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition
|
||||||
TRUNCATE pg_dist_node
|
TRUNCATE pg_dist_node
|
||||||
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, isactive, noderole) VALUES (2, 2, 'localhost', 57638, 'default', FALSE, TRUE, 'primary'::noderole),(1, 1, 'localhost', 57637, 'default', FALSE, TRUE, 'primary'::noderole)
|
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, isactive, noderole, nodecluster) VALUES (2, 2, 'localhost', 57638, 'default', FALSE, TRUE, 'primary'::noderole, 'default'),(1, 1, 'localhost', 57637, 'default', FALSE, TRUE, 'primary'::noderole, 'default')
|
||||||
CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres
|
CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres
|
||||||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE')
|
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE')
|
||||||
ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres
|
ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres
|
||||||
|
@ -130,7 +130,7 @@ SELECT unnest(master_metadata_snapshot());
|
||||||
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition
|
SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition
|
||||||
TRUNCATE pg_dist_node
|
TRUNCATE pg_dist_node
|
||||||
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, isactive, noderole) VALUES (2, 2, 'localhost', 57638, 'default', FALSE, TRUE, 'primary'::noderole),(1, 1, 'localhost', 57637, 'default', FALSE, TRUE, 'primary'::noderole)
|
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, isactive, noderole, nodecluster) VALUES (2, 2, 'localhost', 57638, 'default', FALSE, TRUE, 'primary'::noderole, 'default'),(1, 1, 'localhost', 57637, 'default', FALSE, TRUE, 'primary'::noderole, 'default')
|
||||||
CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres
|
CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres
|
||||||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE')
|
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE')
|
||||||
ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres
|
ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres
|
||||||
|
@ -152,7 +152,7 @@ SELECT unnest(master_metadata_snapshot());
|
||||||
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition
|
SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition
|
||||||
TRUNCATE pg_dist_node
|
TRUNCATE pg_dist_node
|
||||||
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, isactive, noderole) VALUES (2, 2, 'localhost', 57638, 'default', FALSE, TRUE, 'primary'::noderole),(1, 1, 'localhost', 57637, 'default', FALSE, TRUE, 'primary'::noderole)
|
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, isactive, noderole, nodecluster) VALUES (2, 2, 'localhost', 57638, 'default', FALSE, TRUE, 'primary'::noderole, 'default'),(1, 1, 'localhost', 57637, 'default', FALSE, TRUE, 'primary'::noderole, 'default')
|
||||||
CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres
|
CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres
|
||||||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE')
|
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE')
|
||||||
ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres
|
ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres
|
||||||
|
@ -179,8 +179,8 @@ SELECT count(*) FROM pg_dist_node WHERE hasmetadata=true;
|
||||||
SELECT groupid AS worker_1_group FROM pg_dist_node WHERE nodeport = :worker_1_port \gset
|
SELECT groupid AS worker_1_group FROM pg_dist_node WHERE nodeport = :worker_1_port \gset
|
||||||
SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary');
|
SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary');
|
||||||
master_add_node
|
master_add_node
|
||||||
--------------------------------------------
|
----------------------------------------------------
|
||||||
(4,1,localhost,8888,default,f,t,secondary)
|
(4,1,localhost,8888,default,f,t,secondary,default)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
SELECT start_metadata_sync_to_node('localhost', 8888);
|
SELECT start_metadata_sync_to_node('localhost', 8888);
|
||||||
|
@ -229,10 +229,10 @@ SELECT * FROM pg_dist_local_group;
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
SELECT * FROM pg_dist_node ORDER BY nodeid;
|
SELECT * FROM pg_dist_node ORDER BY nodeid;
|
||||||
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole
|
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster
|
||||||
--------+---------+-----------+----------+----------+-------------+----------+----------
|
--------+---------+-----------+----------+----------+-------------+----------+----------+-------------
|
||||||
1 | 1 | localhost | 57637 | default | t | t | primary
|
1 | 1 | localhost | 57637 | default | t | t | primary | default
|
||||||
2 | 2 | localhost | 57638 | default | f | t | primary
|
2 | 2 | localhost | 57638 | default | f | t | primary | default
|
||||||
(2 rows)
|
(2 rows)
|
||||||
|
|
||||||
SELECT * FROM pg_dist_partition ORDER BY logicalrelid;
|
SELECT * FROM pg_dist_partition ORDER BY logicalrelid;
|
||||||
|
@ -366,10 +366,10 @@ SELECT * FROM pg_dist_local_group;
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
SELECT * FROM pg_dist_node ORDER BY nodeid;
|
SELECT * FROM pg_dist_node ORDER BY nodeid;
|
||||||
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole
|
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster
|
||||||
--------+---------+-----------+----------+----------+-------------+----------+----------
|
--------+---------+-----------+----------+----------+-------------+----------+----------+-------------
|
||||||
1 | 1 | localhost | 57637 | default | t | t | primary
|
1 | 1 | localhost | 57637 | default | t | t | primary | default
|
||||||
2 | 2 | localhost | 57638 | default | f | t | primary
|
2 | 2 | localhost | 57638 | default | f | t | primary | default
|
||||||
(2 rows)
|
(2 rows)
|
||||||
|
|
||||||
SELECT * FROM pg_dist_partition ORDER BY logicalrelid;
|
SELECT * FROM pg_dist_partition ORDER BY logicalrelid;
|
||||||
|
@ -1162,8 +1162,8 @@ SELECT create_distributed_table('mx_table', 'a');
|
||||||
\c - postgres - :master_port
|
\c - postgres - :master_port
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
master_add_node
|
master_add_node
|
||||||
-------------------------------------------
|
---------------------------------------------------
|
||||||
(5,4,localhost,57638,default,f,t,primary)
|
(5,4,localhost,57638,default,f,t,primary,default)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
||||||
|
@ -1375,8 +1375,8 @@ WHERE logicalrelid='mx_ref'::regclass;
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
NOTICE: Replicating reference table "mx_ref" to the node localhost:57638
|
NOTICE: Replicating reference table "mx_ref" to the node localhost:57638
|
||||||
master_add_node
|
master_add_node
|
||||||
-------------------------------------------
|
---------------------------------------------------
|
||||||
(6,5,localhost,57638,default,f,t,primary)
|
(6,5,localhost,57638,default,f,t,primary,default)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
SELECT shardid, nodename, nodeport
|
SELECT shardid, nodename, nodeport
|
||||||
|
|
|
@ -15,7 +15,8 @@ INSERT INTO pg_dist_shard_placement
|
||||||
ALTER EXTENSION citus UPDATE TO '7.0-3';
|
ALTER EXTENSION citus UPDATE TO '7.0-3';
|
||||||
|
|
||||||
-- if you add a matching worker the upgrade should succeed
|
-- if you add a matching worker the upgrade should succeed
|
||||||
SELECT master_add_node('localhost', :worker_1_port);
|
INSERT INTO pg_dist_node (nodename, nodeport, groupid)
|
||||||
|
VALUES ('localhost', :worker_1_port, 1);
|
||||||
ALTER EXTENSION citus UPDATE TO '7.0-3';
|
ALTER EXTENSION citus UPDATE TO '7.0-3';
|
||||||
|
|
||||||
SELECT * FROM pg_dist_placement;
|
SELECT * FROM pg_dist_placement;
|
||||||
|
|
|
@ -118,6 +118,7 @@ ALTER EXTENSION citus UPDATE TO '7.0-5';
|
||||||
ALTER EXTENSION citus UPDATE TO '7.0-6';
|
ALTER EXTENSION citus UPDATE TO '7.0-6';
|
||||||
ALTER EXTENSION citus UPDATE TO '7.0-7';
|
ALTER EXTENSION citus UPDATE TO '7.0-7';
|
||||||
ALTER EXTENSION citus UPDATE TO '7.0-8';
|
ALTER EXTENSION citus UPDATE TO '7.0-8';
|
||||||
|
ALTER EXTENSION citus UPDATE TO '7.0-9';
|
||||||
|
|
||||||
-- show running version
|
-- show running version
|
||||||
SHOW citus.version;
|
SHOW citus.version;
|
||||||
|
|
Loading…
Reference in New Issue