mirror of https://github.com/citusdata/citus.git
Return nodeid instead of record in some UDFs
parent
d23185d077
commit
48ff4691a0
|
@ -304,4 +304,62 @@ CREATE OR REPLACE FUNCTION rebalance_table_shards(relation regclass,
|
|||
COMMENT ON FUNCTION rebalance_table_shards(regclass, float4, int, bigint[], citus.shard_transfer_mode)
|
||||
IS 'rebalance the shards of the given table across the worker nodes (including colocated shards of other tables)';
|
||||
|
||||
DROP FUNCTION master_add_node(text, integer, integer, noderole, name);
|
||||
CREATE FUNCTION master_add_node(nodename text,
|
||||
nodeport integer,
|
||||
groupid integer default 0,
|
||||
noderole noderole default 'primary',
|
||||
nodecluster name default 'default')
|
||||
RETURNS INTEGER
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$master_add_node$$;
|
||||
COMMENT ON FUNCTION master_add_node(nodename text, nodeport integer,
|
||||
groupid integer, noderole noderole, nodecluster name)
|
||||
IS 'add node to the cluster';
|
||||
|
||||
DROP FUNCTION master_add_inactive_node(text, integer, integer, noderole, name);
|
||||
CREATE FUNCTION master_add_inactive_node(nodename text,
|
||||
nodeport integer,
|
||||
groupid integer default 0,
|
||||
noderole noderole default 'primary',
|
||||
nodecluster name default 'default')
|
||||
RETURNS INTEGER
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME',$$master_add_inactive_node$$;
|
||||
COMMENT ON FUNCTION master_add_inactive_node(nodename text,nodeport integer,
|
||||
groupid integer, noderole noderole,
|
||||
nodecluster name)
|
||||
IS 'prepare node by adding it to pg_dist_node';
|
||||
|
||||
SET search_path = 'pg_catalog';
|
||||
|
||||
DROP FUNCTION master_activate_node(text, integer);
|
||||
CREATE FUNCTION master_activate_node(nodename text,
|
||||
nodeport integer)
|
||||
RETURNS INTEGER
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME',$$master_activate_node$$;
|
||||
COMMENT ON FUNCTION master_activate_node(nodename text, nodeport integer)
|
||||
IS 'activate a node which is in the cluster';
|
||||
|
||||
DROP FUNCTION master_add_secondary_node(text, integer, text, integer, name);
|
||||
CREATE FUNCTION master_add_secondary_node(nodename text,
|
||||
nodeport integer,
|
||||
primaryname text,
|
||||
primaryport integer,
|
||||
nodecluster name default 'default')
|
||||
RETURNS INTEGER
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$master_add_secondary_node$$;
|
||||
COMMENT ON FUNCTION master_add_secondary_node(nodename text, nodeport integer,
|
||||
primaryname text, primaryport integer,
|
||||
nodecluster name)
|
||||
IS 'add a secondary node to the cluster';
|
||||
|
||||
|
||||
REVOKE ALL ON FUNCTION master_activate_node(text,int) FROM PUBLIC;
|
||||
REVOKE ALL ON FUNCTION master_add_inactive_node(text,int,int,noderole,name) FROM PUBLIC;
|
||||
REVOKE ALL ON FUNCTION master_add_node(text,int,int,noderole,name) FROM PUBLIC;
|
||||
REVOKE ALL ON FUNCTION master_add_secondary_node(text,int,text,int,name) FROM PUBLIC;
|
||||
|
||||
RESET search_path;
|
||||
|
|
|
@ -54,14 +54,13 @@ int GroupSize = 1;
|
|||
char *CurrentCluster = "default";
|
||||
|
||||
/* local function forward declarations */
|
||||
static Datum ActivateNode(char *nodeName, int nodePort);
|
||||
static int ActivateNode(char *nodeName, int nodePort);
|
||||
static void RemoveNodeFromCluster(char *nodeName, int32 nodePort);
|
||||
static Datum AddNodeMetadata(char *nodeName, int32 nodePort, int32 groupId,
|
||||
char *nodeRack, bool hasMetadata, bool isActive,
|
||||
Oid nodeRole, char *nodeCluster, bool *nodeAlreadyExists);
|
||||
static int AddNodeMetadata(char *nodeName, int32 nodePort, int32 groupId,
|
||||
char *nodeRack, bool hasMetadata, bool isActive,
|
||||
Oid nodeRole, char *nodeCluster, bool *nodeAlreadyExists);
|
||||
static void SetNodeState(char *nodeName, int32 nodePort, bool isActive);
|
||||
static HeapTuple GetNodeTuple(char *nodeName, int32 nodePort);
|
||||
static Datum GenerateNodeTuple(WorkerNode *workerNode);
|
||||
static int32 GetNextGroupId(void);
|
||||
static int GetNextNodeId(void);
|
||||
static void InsertNodeRow(int nodeid, char *nodename, int32 nodeport, int32 groupId,
|
||||
|
@ -85,7 +84,7 @@ PG_FUNCTION_INFO_V1(get_shard_id_for_distribution_column);
|
|||
|
||||
|
||||
/*
|
||||
* master_add_node function adds a new node to the cluster and returns its data. It also
|
||||
* master_add_node function adds a new node to the cluster and returns its id. It also
|
||||
* replicates all reference tables to the new node.
|
||||
*/
|
||||
Datum
|
||||
|
@ -101,7 +100,7 @@ master_add_node(PG_FUNCTION_ARGS)
|
|||
bool hasMetadata = false;
|
||||
bool isActive = false;
|
||||
bool nodeAlreadyExists = false;
|
||||
Datum nodeRecord;
|
||||
int nodeId = 0;
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
|
@ -122,9 +121,9 @@ master_add_node(PG_FUNCTION_ARGS)
|
|||
nodeRole = PG_GETARG_OID(3);
|
||||
}
|
||||
|
||||
nodeRecord = AddNodeMetadata(nodeNameString, nodePort, groupId, nodeRack,
|
||||
hasMetadata, isActive, nodeRole, nodeClusterString,
|
||||
&nodeAlreadyExists);
|
||||
nodeId = AddNodeMetadata(nodeNameString, nodePort, groupId, nodeRack,
|
||||
hasMetadata, isActive, nodeRole, nodeClusterString,
|
||||
&nodeAlreadyExists);
|
||||
|
||||
/*
|
||||
* After adding new node, if the node did not already exist, we will activate
|
||||
|
@ -133,16 +132,16 @@ master_add_node(PG_FUNCTION_ARGS)
|
|||
*/
|
||||
if (!nodeAlreadyExists)
|
||||
{
|
||||
nodeRecord = ActivateNode(nodeNameString, nodePort);
|
||||
ActivateNode(nodeNameString, nodePort);
|
||||
}
|
||||
|
||||
PG_RETURN_DATUM(nodeRecord);
|
||||
PG_RETURN_INT32(nodeId);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* master_add_inactive_node function adds a new node to the cluster as inactive node
|
||||
* and returns information about newly added node. It does not replicate reference
|
||||
* and returns id of the newly added node. It does not replicate reference
|
||||
* tables to the new node, it only adds new node to the pg_dist_node table.
|
||||
*/
|
||||
Datum
|
||||
|
@ -159,15 +158,15 @@ master_add_inactive_node(PG_FUNCTION_ARGS)
|
|||
bool hasMetadata = false;
|
||||
bool isActive = false;
|
||||
bool nodeAlreadyExists = false;
|
||||
Datum nodeRecord;
|
||||
int nodeId = 0;
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
nodeRecord = AddNodeMetadata(nodeNameString, nodePort, groupId, nodeRack,
|
||||
hasMetadata, isActive, nodeRole, nodeClusterString,
|
||||
&nodeAlreadyExists);
|
||||
nodeId = AddNodeMetadata(nodeNameString, nodePort, groupId, nodeRack,
|
||||
hasMetadata, isActive, nodeRole, nodeClusterString,
|
||||
&nodeAlreadyExists);
|
||||
|
||||
PG_RETURN_DATUM(nodeRecord);
|
||||
PG_RETURN_INT32(nodeId);
|
||||
}
|
||||
|
||||
|
||||
|
@ -194,15 +193,15 @@ master_add_secondary_node(PG_FUNCTION_ARGS)
|
|||
bool hasMetadata = false;
|
||||
bool isActive = true;
|
||||
bool nodeAlreadyExists = false;
|
||||
Datum nodeRecord;
|
||||
int nodeId = 0;
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
nodeRecord = AddNodeMetadata(nodeNameString, nodePort, groupId, nodeRack,
|
||||
hasMetadata, isActive, nodeRole, nodeClusterString,
|
||||
&nodeAlreadyExists);
|
||||
nodeId = AddNodeMetadata(nodeNameString, nodePort, groupId, nodeRack,
|
||||
hasMetadata, isActive, nodeRole, nodeClusterString,
|
||||
&nodeAlreadyExists);
|
||||
|
||||
PG_RETURN_DATUM(nodeRecord);
|
||||
PG_RETURN_INT32(nodeId);
|
||||
}
|
||||
|
||||
|
||||
|
@ -305,15 +304,15 @@ master_activate_node(PG_FUNCTION_ARGS)
|
|||
int32 nodePort = PG_GETARG_INT32(1);
|
||||
|
||||
char *nodeNameString = text_to_cstring(nodeName);
|
||||
Datum nodeRecord = 0;
|
||||
int nodeId = 0;
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
EnsureCoordinator();
|
||||
|
||||
nodeRecord = ActivateNode(nodeNameString, nodePort);
|
||||
nodeId = ActivateNode(nodeNameString, nodePort);
|
||||
|
||||
PG_RETURN_DATUM(nodeRecord);
|
||||
PG_RETURN_INT32(nodeId);
|
||||
}
|
||||
|
||||
|
||||
|
@ -439,12 +438,11 @@ PrimaryNodeForGroup(int32 groupId, bool *groupContainsNodes)
|
|||
* includes only replicating the reference tables and setting isactive column of the
|
||||
* given node.
|
||||
*/
|
||||
static Datum
|
||||
static int
|
||||
ActivateNode(char *nodeName, int nodePort)
|
||||
{
|
||||
WorkerNode *workerNode = NULL;
|
||||
bool isActive = true;
|
||||
Datum nodeRecord = 0;
|
||||
|
||||
/* take an exclusive lock on pg_dist_node to serialize pg_dist_node changes */
|
||||
LockRelationOid(DistNodeRelationId(), ExclusiveLock);
|
||||
|
@ -459,9 +457,7 @@ ActivateNode(char *nodeName, int nodePort)
|
|||
ReplicateAllReferenceTablesToNode(nodeName, nodePort);
|
||||
}
|
||||
|
||||
nodeRecord = GenerateNodeTuple(workerNode);
|
||||
|
||||
return nodeRecord;
|
||||
return workerNode->nodeId;
|
||||
}
|
||||
|
||||
|
||||
|
@ -481,7 +477,7 @@ master_update_node(PG_FUNCTION_ARGS)
|
|||
/*
|
||||
* force is used when an update needs to happen regardless of conflicting locks. This
|
||||
* feature is important to force the update during a failover due to failure, eg. by
|
||||
* a highavailability system such as pg_auto_failover. The strategy is a to start a
|
||||
* a high-availability system such as pg_auto_failover. The strategy is a to start a
|
||||
* background worker that actively cancels backends holding conflicting locks with
|
||||
* this backend.
|
||||
*
|
||||
|
@ -956,19 +952,18 @@ CountPrimariesWithMetadata(void)
|
|||
/*
|
||||
* AddNodeMetadata checks the given node information and adds the specified node to the
|
||||
* pg_dist_node table of the master and workers with metadata.
|
||||
* If the node already exists, the function returns the information about the node.
|
||||
* If the node already exists, the function returns the id of the node.
|
||||
* If not, the following prodecure is followed while adding a node: If the groupId is not
|
||||
* explicitly given by the user, the function picks the group that the new node should
|
||||
* be in with respect to GroupSize. Then, the new node is inserted into the local
|
||||
* pg_dist_node as well as the nodes with hasmetadata=true.
|
||||
*/
|
||||
static Datum
|
||||
static int
|
||||
AddNodeMetadata(char *nodeName, int32 nodePort, int32 groupId, char *nodeRack,
|
||||
bool hasMetadata, bool isActive, Oid nodeRole, char *nodeCluster,
|
||||
bool *nodeAlreadyExists)
|
||||
{
|
||||
int nextNodeIdInt = 0;
|
||||
Datum returnData = 0;
|
||||
WorkerNode *workerNode = NULL;
|
||||
char *nodeDeleteCommand = NULL;
|
||||
uint32 primariesWithMetadata = 0;
|
||||
|
@ -988,10 +983,9 @@ AddNodeMetadata(char *nodeName, int32 nodePort, int32 groupId, char *nodeRack,
|
|||
if (workerNode != NULL)
|
||||
{
|
||||
/* fill return data and return */
|
||||
returnData = GenerateNodeTuple(workerNode);
|
||||
*nodeAlreadyExists = true;
|
||||
|
||||
return returnData;
|
||||
return workerNode->nodeId;
|
||||
}
|
||||
|
||||
/* user lets Citus to decide on the group that the newly added node should be in */
|
||||
|
@ -1040,8 +1034,7 @@ AddNodeMetadata(char *nodeName, int32 nodePort, int32 groupId, char *nodeRack,
|
|||
SendCommandToWorkers(WORKERS_WITH_METADATA, nodeInsertCommand);
|
||||
}
|
||||
|
||||
returnData = GenerateNodeTuple(workerNode);
|
||||
return returnData;
|
||||
return nextNodeIdInt;
|
||||
}
|
||||
|
||||
|
||||
|
@ -1129,50 +1122,6 @@ GetNodeTuple(char *nodeName, int32 nodePort)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* GenerateNodeTuple gets a worker node and return a heap tuple of
|
||||
* given worker node.
|
||||
*/
|
||||
static Datum
|
||||
GenerateNodeTuple(WorkerNode *workerNode)
|
||||
{
|
||||
Relation pgDistNode = NULL;
|
||||
TupleDesc tupleDescriptor = NULL;
|
||||
HeapTuple heapTuple = NULL;
|
||||
Datum nodeDatum = 0;
|
||||
Datum values[Natts_pg_dist_node];
|
||||
bool isNulls[Natts_pg_dist_node];
|
||||
|
||||
Datum nodeClusterStringDatum = CStringGetDatum(workerNode->nodeCluster);
|
||||
Datum nodeClusterNameDatum = DirectFunctionCall1(namein, nodeClusterStringDatum);
|
||||
|
||||
/* form new shard tuple */
|
||||
memset(values, 0, sizeof(values));
|
||||
memset(isNulls, false, sizeof(isNulls));
|
||||
|
||||
values[Anum_pg_dist_node_nodeid - 1] = UInt32GetDatum(workerNode->nodeId);
|
||||
values[Anum_pg_dist_node_groupid - 1] = Int32GetDatum(workerNode->groupId);
|
||||
values[Anum_pg_dist_node_nodename - 1] = CStringGetTextDatum(workerNode->workerName);
|
||||
values[Anum_pg_dist_node_nodeport - 1] = UInt32GetDatum(workerNode->workerPort);
|
||||
values[Anum_pg_dist_node_noderack - 1] = CStringGetTextDatum(workerNode->workerRack);
|
||||
values[Anum_pg_dist_node_hasmetadata - 1] = BoolGetDatum(workerNode->hasMetadata);
|
||||
values[Anum_pg_dist_node_isactive - 1] = BoolGetDatum(workerNode->isActive);
|
||||
values[Anum_pg_dist_node_noderole - 1] = ObjectIdGetDatum(workerNode->nodeRole);
|
||||
values[Anum_pg_dist_node_nodecluster - 1] = nodeClusterNameDatum;
|
||||
|
||||
pgDistNode = heap_open(DistNodeRelationId(), AccessShareLock);
|
||||
|
||||
/* generate the tuple */
|
||||
tupleDescriptor = RelationGetDescr(pgDistNode);
|
||||
heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls);
|
||||
nodeDatum = HeapTupleGetDatum(heapTuple);
|
||||
|
||||
heap_close(pgDistNode, NoLock);
|
||||
|
||||
return nodeDatum;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetNextGroupId allocates and returns a unique groupId for the group
|
||||
* to be created. This allocation occurs both in shared memory and in write
|
||||
|
|
|
@ -206,9 +206,9 @@ ORDER BY placementid;
|
|||
-- it does not create any network activity therefore can not
|
||||
-- be injected failure through network
|
||||
SELECT master_add_inactive_node('localhost', :worker_2_proxy_port);
|
||||
master_add_inactive_node
|
||||
--------------------------------------------------
|
||||
(3,3,localhost,9060,default,f,f,primary,default)
|
||||
master_add_inactive_node
|
||||
--------------------------
|
||||
3
|
||||
(1 row)
|
||||
|
||||
SELECT master_remove_node('localhost', :worker_2_proxy_port);
|
||||
|
@ -292,9 +292,9 @@ SELECT citus.mitmproxy('conn.allow()');
|
|||
|
||||
SELECT master_add_node('localhost', :worker_2_proxy_port);
|
||||
NOTICE: Replicating reference table "user_table" to the node localhost:9060
|
||||
master_add_node
|
||||
--------------------------------------------------
|
||||
(6,6,localhost,9060,default,f,t,primary,default)
|
||||
master_add_node
|
||||
-----------------
|
||||
6
|
||||
(1 row)
|
||||
|
||||
-- verify node is added
|
||||
|
@ -349,9 +349,9 @@ SELECT citus.mitmproxy('conn.allow()');
|
|||
|
||||
SELECT master_add_node('localhost', :worker_1_port);
|
||||
NOTICE: Replicating reference table "user_table" to the node localhost:57637
|
||||
master_add_node
|
||||
---------------------------------------------------
|
||||
(8,8,localhost,57637,default,f,t,primary,default)
|
||||
master_add_node
|
||||
-----------------
|
||||
8
|
||||
(1 row)
|
||||
|
||||
-- verify node is added
|
||||
|
|
|
@ -6,14 +6,14 @@ SELECT citus.mitmproxy('conn.allow()');
|
|||
|
||||
-- add the workers
|
||||
SELECT master_add_node('localhost', :worker_1_port);
|
||||
master_add_node
|
||||
---------------------------------------------------
|
||||
(1,1,localhost,57637,default,f,t,primary,default)
|
||||
master_add_node
|
||||
-----------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT master_add_node('localhost', :worker_2_proxy_port); -- an mitmproxy which forwards to the second worker
|
||||
master_add_node
|
||||
--------------------------------------------------
|
||||
(2,2,localhost,9060,default,f,t,primary,default)
|
||||
master_add_node
|
||||
-----------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
|
|
|
@ -11,11 +11,11 @@ step s1-begin:
|
|||
BEGIN;
|
||||
|
||||
step s1-add-second-worker:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
step s2-copy-to-reference-table:
|
||||
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
|
||||
<waiting ...>
|
||||
|
@ -54,15 +54,15 @@ step s2-copy-to-reference-table:
|
|||
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
|
||||
|
||||
step s1-add-second-worker:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-add-second-worker: <... completed>
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
step s2-print-content:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
|
@ -91,11 +91,11 @@ step s1-begin:
|
|||
BEGIN;
|
||||
|
||||
step s1-add-second-worker:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
step s2-insert-to-reference-table:
|
||||
INSERT INTO test_reference_table VALUES (6);
|
||||
<waiting ...>
|
||||
|
@ -134,15 +134,15 @@ step s2-insert-to-reference-table:
|
|||
INSERT INTO test_reference_table VALUES (6);
|
||||
|
||||
step s1-add-second-worker:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-add-second-worker: <... completed>
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
step s2-print-content:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
|
@ -171,11 +171,11 @@ step s1-begin:
|
|||
BEGIN;
|
||||
|
||||
step s1-add-second-worker:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
step s2-ddl-on-reference-table:
|
||||
CREATE INDEX reference_index ON test_reference_table(test_id);
|
||||
<waiting ...>
|
||||
|
@ -214,15 +214,15 @@ step s2-ddl-on-reference-table:
|
|||
CREATE INDEX reference_index ON test_reference_table(test_id);
|
||||
|
||||
step s1-add-second-worker:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-add-second-worker: <... completed>
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
step s2-print-index-count:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
|
@ -248,11 +248,11 @@ step s1-begin:
|
|||
BEGIN;
|
||||
|
||||
step s1-add-second-worker:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
step s2-copy-to-reference-table:
|
||||
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
|
||||
<waiting ...>
|
||||
|
@ -288,15 +288,15 @@ step s2-copy-to-reference-table:
|
|||
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
|
||||
|
||||
step s1-add-second-worker:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-add-second-worker: <... completed>
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
step s2-print-content:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
|
@ -322,11 +322,11 @@ step s1-begin:
|
|||
BEGIN;
|
||||
|
||||
step s1-add-second-worker:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
step s2-insert-to-reference-table:
|
||||
INSERT INTO test_reference_table VALUES (6);
|
||||
<waiting ...>
|
||||
|
@ -362,15 +362,15 @@ step s2-insert-to-reference-table:
|
|||
INSERT INTO test_reference_table VALUES (6);
|
||||
|
||||
step s1-add-second-worker:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-add-second-worker: <... completed>
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
step s2-print-content:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
|
@ -396,11 +396,11 @@ step s1-begin:
|
|||
BEGIN;
|
||||
|
||||
step s1-add-second-worker:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
step s2-ddl-on-reference-table:
|
||||
CREATE INDEX reference_index ON test_reference_table(test_id);
|
||||
<waiting ...>
|
||||
|
@ -436,15 +436,15 @@ step s2-ddl-on-reference-table:
|
|||
CREATE INDEX reference_index ON test_reference_table(test_id);
|
||||
|
||||
step s1-add-second-worker:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-add-second-worker: <... completed>
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
step s2-print-index-count:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
|
|
|
@ -2,12 +2,12 @@ Parsed test spec with 1 sessions
|
|||
|
||||
starting permutation: s1a
|
||||
step s1a:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57637);
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57637 t
|
||||
nodename nodeport isactive
|
||||
1
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
Parsed test spec with 4 sessions
|
||||
|
||||
starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-create-table s1-commit s2-print-distributed-objects
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57637 t
|
||||
1
|
||||
step s1-print-distributed-objects:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
-- print an overview of all distributed objects
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1;
|
||||
|
@ -16,9 +16,9 @@ step s1-print-distributed-objects:
|
|||
|
||||
SELECT master_remove_node('localhost', 57638);
|
||||
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
pg_identify_object_as_address
|
||||
|
||||
count
|
||||
|
@ -35,11 +35,11 @@ step s1-begin:
|
|||
BEGIN;
|
||||
|
||||
step s1-add-worker:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
step s2-public-schema:
|
||||
SET search_path TO public;
|
||||
|
||||
|
@ -79,11 +79,11 @@ master_remove_node
|
|||
|
||||
|
||||
starting permutation: s1-print-distributed-objects s1-begin s2-begin s1-add-worker s2-public-schema s2-create-table s1-commit s2-commit s2-print-distributed-objects
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57637 t
|
||||
1
|
||||
step s1-print-distributed-objects:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
-- print an overview of all distributed objects
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1;
|
||||
|
@ -94,9 +94,9 @@ step s1-print-distributed-objects:
|
|||
|
||||
SELECT master_remove_node('localhost', 57638);
|
||||
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
pg_identify_object_as_address
|
||||
|
||||
count
|
||||
|
@ -116,11 +116,11 @@ step s2-begin:
|
|||
BEGIN;
|
||||
|
||||
step s1-add-worker:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
step s2-public-schema:
|
||||
SET search_path TO public;
|
||||
|
||||
|
@ -163,11 +163,11 @@ master_remove_node
|
|||
|
||||
|
||||
starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-public-schema s2-create-table s1-add-worker s2-commit s1-commit s2-print-distributed-objects
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57637 t
|
||||
1
|
||||
step s1-print-distributed-objects:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
-- print an overview of all distributed objects
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1;
|
||||
|
@ -178,9 +178,9 @@ step s1-print-distributed-objects:
|
|||
|
||||
SELECT master_remove_node('localhost', 57638);
|
||||
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
pg_identify_object_as_address
|
||||
|
||||
count
|
||||
|
@ -212,15 +212,15 @@ create_distributed_table
|
|||
|
||||
|
||||
step s1-add-worker:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-add-worker: <... completed>
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
|
@ -247,11 +247,11 @@ master_remove_node
|
|||
|
||||
|
||||
starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-create-schema s2-create-table s1-commit s2-print-distributed-objects
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57637 t
|
||||
1
|
||||
step s1-print-distributed-objects:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
-- print an overview of all distributed objects
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1;
|
||||
|
@ -262,9 +262,9 @@ step s1-print-distributed-objects:
|
|||
|
||||
SELECT master_remove_node('localhost', 57638);
|
||||
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
pg_identify_object_as_address
|
||||
|
||||
count
|
||||
|
@ -281,11 +281,11 @@ step s1-begin:
|
|||
BEGIN;
|
||||
|
||||
step s1-add-worker:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
step s2-create-schema:
|
||||
CREATE SCHEMA myschema;
|
||||
SET search_path TO myschema;
|
||||
|
@ -327,11 +327,11 @@ master_remove_node
|
|||
|
||||
|
||||
starting permutation: s1-print-distributed-objects s1-begin s2-begin s1-add-worker s2-create-schema s2-create-table s1-commit s2-commit s2-print-distributed-objects
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57637 t
|
||||
1
|
||||
step s1-print-distributed-objects:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
-- print an overview of all distributed objects
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1;
|
||||
|
@ -342,9 +342,9 @@ step s1-print-distributed-objects:
|
|||
|
||||
SELECT master_remove_node('localhost', 57638);
|
||||
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
pg_identify_object_as_address
|
||||
|
||||
count
|
||||
|
@ -364,11 +364,11 @@ step s2-begin:
|
|||
BEGIN;
|
||||
|
||||
step s1-add-worker:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
step s2-create-schema:
|
||||
CREATE SCHEMA myschema;
|
||||
SET search_path TO myschema;
|
||||
|
@ -413,11 +413,11 @@ master_remove_node
|
|||
|
||||
|
||||
starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-create-schema s2-create-table s1-add-worker s2-commit s1-commit s2-print-distributed-objects
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57637 t
|
||||
1
|
||||
step s1-print-distributed-objects:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
-- print an overview of all distributed objects
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1;
|
||||
|
@ -428,9 +428,9 @@ step s1-print-distributed-objects:
|
|||
|
||||
SELECT master_remove_node('localhost', 57638);
|
||||
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
pg_identify_object_as_address
|
||||
|
||||
count
|
||||
|
@ -463,15 +463,15 @@ create_distributed_table
|
|||
|
||||
|
||||
step s1-add-worker:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-add-worker: <... completed>
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
|
@ -499,11 +499,11 @@ master_remove_node
|
|||
|
||||
|
||||
starting permutation: s1-print-distributed-objects s2-create-schema s1-begin s2-begin s3-begin s1-add-worker s2-create-table s3-use-schema s3-create-table s1-commit s2-commit s3-commit s2-print-distributed-objects
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57637 t
|
||||
1
|
||||
step s1-print-distributed-objects:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
-- print an overview of all distributed objects
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1;
|
||||
|
@ -514,9 +514,9 @@ step s1-print-distributed-objects:
|
|||
|
||||
SELECT master_remove_node('localhost', 57638);
|
||||
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
pg_identify_object_as_address
|
||||
|
||||
count
|
||||
|
@ -543,11 +543,11 @@ step s3-begin:
|
|||
BEGIN;
|
||||
|
||||
step s1-add-worker:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
step s2-create-table:
|
||||
CREATE TABLE t1 (a int, b int);
|
||||
-- session needs to have replication factor set to 1, can't do in setup
|
||||
|
@ -604,11 +604,11 @@ master_remove_node
|
|||
|
||||
|
||||
starting permutation: s1-print-distributed-objects s2-create-schema s1-begin s2-begin s3-begin s4-begin s1-add-worker s2-create-table s3-use-schema s3-create-table s4-use-schema s4-create-table s1-commit s2-commit s3-commit s4-commit s2-print-distributed-objects
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57637 t
|
||||
1
|
||||
step s1-print-distributed-objects:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
-- print an overview of all distributed objects
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1;
|
||||
|
@ -619,9 +619,9 @@ step s1-print-distributed-objects:
|
|||
|
||||
SELECT master_remove_node('localhost', 57638);
|
||||
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
pg_identify_object_as_address
|
||||
|
||||
count
|
||||
|
@ -651,11 +651,11 @@ step s4-begin:
|
|||
BEGIN;
|
||||
|
||||
step s1-add-worker:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
step s2-create-table:
|
||||
CREATE TABLE t1 (a int, b int);
|
||||
-- session needs to have replication factor set to 1, can't do in setup
|
||||
|
@ -728,11 +728,11 @@ master_remove_node
|
|||
|
||||
|
||||
starting permutation: s1-print-distributed-objects s1-add-worker s2-create-schema s2-begin s3-begin s3-use-schema s2-create-table s3-create-table s2-commit s3-commit s2-print-distributed-objects
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57637 t
|
||||
1
|
||||
step s1-print-distributed-objects:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
-- print an overview of all distributed objects
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1;
|
||||
|
@ -743,9 +743,9 @@ step s1-print-distributed-objects:
|
|||
|
||||
SELECT master_remove_node('localhost', 57638);
|
||||
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
pg_identify_object_as_address
|
||||
|
||||
count
|
||||
|
@ -759,11 +759,11 @@ master_remove_node
|
|||
|
||||
|
||||
step s1-add-worker:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
step s2-create-schema:
|
||||
CREATE SCHEMA myschema;
|
||||
SET search_path TO myschema;
|
||||
|
@ -826,11 +826,11 @@ master_remove_node
|
|||
|
||||
|
||||
starting permutation: s1-print-distributed-objects s1-begin s2-begin s4-begin s1-add-worker s2-create-schema s4-create-schema2 s2-create-table s4-create-table s1-commit s2-commit s4-commit s2-print-distributed-objects
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57637 t
|
||||
1
|
||||
step s1-print-distributed-objects:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
-- print an overview of all distributed objects
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1;
|
||||
|
@ -841,9 +841,9 @@ step s1-print-distributed-objects:
|
|||
|
||||
SELECT master_remove_node('localhost', 57638);
|
||||
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
pg_identify_object_as_address
|
||||
|
||||
count
|
||||
|
@ -866,11 +866,11 @@ step s4-begin:
|
|||
BEGIN;
|
||||
|
||||
step s1-add-worker:
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
nodename nodeport isactive
|
||||
?column?
|
||||
|
||||
localhost 57638 t
|
||||
1
|
||||
step s2-create-schema:
|
||||
CREATE SCHEMA myschema;
|
||||
SET search_path TO myschema;
|
||||
|
|
|
@ -28,10 +28,10 @@ SELECT master_get_active_worker_nodes();
|
|||
(2 rows)
|
||||
|
||||
-- try to add a node that is already in the cluster
|
||||
SELECT nodeid, groupid FROM master_add_node('localhost', :worker_1_port);
|
||||
nodeid | groupid
|
||||
--------+---------
|
||||
1 | 1
|
||||
SELECT * FROM master_add_node('localhost', :worker_1_port);
|
||||
master_add_node
|
||||
-----------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- get the active nodes
|
||||
|
@ -78,10 +78,10 @@ SELECT master_get_active_worker_nodes();
|
|||
-- add some shard placements to the cluster
|
||||
SET citus.shard_count TO 16;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SELECT isactive FROM master_activate_node('localhost', :worker_2_port);
|
||||
isactive
|
||||
----------
|
||||
t
|
||||
SELECT * FROM master_activate_node('localhost', :worker_2_port);
|
||||
master_activate_node
|
||||
----------------------
|
||||
3
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE cluster_management_test (col_1 text, col_2 int);
|
||||
|
@ -229,10 +229,10 @@ SELECT master_get_active_worker_nodes();
|
|||
(1 row)
|
||||
|
||||
-- restore the node for next tests
|
||||
SELECT isactive FROM master_activate_node('localhost', :worker_2_port);
|
||||
isactive
|
||||
----------
|
||||
t
|
||||
SELECT * FROM master_activate_node('localhost', :worker_2_port);
|
||||
master_activate_node
|
||||
----------------------
|
||||
3
|
||||
(1 row)
|
||||
|
||||
-- try to remove a node with active placements and see that node removal is failed
|
||||
|
@ -281,7 +281,8 @@ DELETE FROM pg_dist_node WHERE nodeport=:worker_2_port;
|
|||
SELECT * FROM cluster_management_test;
|
||||
ERROR: there is a shard placement in node group 3 but there are no nodes in that group
|
||||
-- clean-up
|
||||
SELECT groupid as new_group FROM master_add_node('localhost', :worker_2_port) \gset
|
||||
SELECT master_add_node('localhost', :worker_2_port) AS new_node \gset
|
||||
SELECT groupid AS new_group FROM pg_dist_node WHERE nodeid = :new_node \gset
|
||||
UPDATE pg_dist_placement SET groupid = :new_group WHERE groupid = :worker_2_group;
|
||||
-- test that you are allowed to remove secondary nodes even if there are placements
|
||||
SELECT 1 FROM master_add_node('localhost', 9990, groupid => :new_group, noderole => 'secondary');
|
||||
|
@ -369,9 +370,9 @@ SELECT count(1) FROM pg_dist_node;
|
|||
SELECT
|
||||
master_add_node('localhost', :worker_1_port),
|
||||
master_add_node('localhost', :worker_2_port);
|
||||
master_add_node | master_add_node
|
||||
----------------------------------------------------+-----------------------------------------------------
|
||||
(11,9,localhost,57637,default,f,t,primary,default) | (12,10,localhost,57638,default,f,t,primary,default)
|
||||
master_add_node | master_add_node
|
||||
-----------------+-----------------
|
||||
11 | 12
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM pg_dist_node ORDER BY nodeid;
|
||||
|
@ -556,15 +557,15 @@ SELECT 1 FROM master_add_inactive_node('localhost', 9996, groupid => :worker_2_g
|
|||
|
||||
-- check that you can add a seconary to a non-default cluster, and activate it, and remove it
|
||||
SELECT master_add_inactive_node('localhost', 9999, groupid => :worker_2_group, nodecluster => 'olap', noderole => 'secondary');
|
||||
master_add_inactive_node
|
||||
---------------------------------------------------
|
||||
(22,16,localhost,9999,default,f,f,secondary,olap)
|
||||
master_add_inactive_node
|
||||
--------------------------
|
||||
22
|
||||
(1 row)
|
||||
|
||||
SELECT master_activate_node('localhost', 9999);
|
||||
master_activate_node
|
||||
---------------------------------------------------
|
||||
(22,16,localhost,9999,default,f,t,secondary,olap)
|
||||
master_activate_node
|
||||
----------------------
|
||||
22
|
||||
(1 row)
|
||||
|
||||
SELECT master_disable_node('localhost', 9999);
|
||||
|
@ -600,9 +601,9 @@ DETAIL: Failing row contains (16, 14, localhost, 57637, default, f, t, primary,
|
|||
-- check that you /can/ add a secondary node to a non-default cluster
|
||||
SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset
|
||||
SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary', nodecluster=> 'olap');
|
||||
master_add_node
|
||||
---------------------------------------------------
|
||||
(23,14,localhost,8888,default,f,t,secondary,olap)
|
||||
master_add_node
|
||||
-----------------
|
||||
23
|
||||
(1 row)
|
||||
|
||||
-- check that super-long cluster names are truncated
|
||||
|
@ -613,9 +614,9 @@ SELECT master_add_node('localhost', 8887, groupid => :worker_1_group, noderole =
|
|||
'thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars.'
|
||||
'overflow'
|
||||
);
|
||||
master_add_node
|
||||
--------------------------------------------------------------------------------------------------------------
|
||||
(24,14,localhost,8887,default,f,t,secondary,thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars.)
|
||||
master_add_node
|
||||
-----------------
|
||||
24
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM pg_dist_node WHERE nodeport=8887;
|
||||
|
@ -628,23 +629,23 @@ SELECT * FROM pg_dist_node WHERE nodeport=8887;
|
|||
-- them in any of the remaining tests
|
||||
-- master_add_secondary_node lets you skip looking up the groupid
|
||||
SELECT master_add_secondary_node('localhost', 9995, 'localhost', :worker_1_port);
|
||||
master_add_secondary_node
|
||||
------------------------------------------------------
|
||||
(25,14,localhost,9995,default,f,t,secondary,default)
|
||||
master_add_secondary_node
|
||||
---------------------------
|
||||
25
|
||||
(1 row)
|
||||
|
||||
SELECT master_add_secondary_node('localhost', 9994, primaryname => 'localhost', primaryport => :worker_2_port);
|
||||
master_add_secondary_node
|
||||
------------------------------------------------------
|
||||
(26,16,localhost,9994,default,f,t,secondary,default)
|
||||
master_add_secondary_node
|
||||
---------------------------
|
||||
26
|
||||
(1 row)
|
||||
|
||||
SELECT master_add_secondary_node('localhost', 9993, 'localhost', 2000);
|
||||
ERROR: node at "localhost:2000" does not exist
|
||||
SELECT master_add_secondary_node('localhost', 9992, 'localhost', :worker_1_port, nodecluster => 'second-cluster');
|
||||
master_add_secondary_node
|
||||
-------------------------------------------------------------
|
||||
(27,14,localhost,9992,default,f,t,secondary,second-cluster)
|
||||
master_add_secondary_node
|
||||
---------------------------
|
||||
27
|
||||
(1 row)
|
||||
|
||||
SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset
|
||||
|
|
|
@ -175,9 +175,9 @@ SELECT count(*) FROM pg_dist_node WHERE hasmetadata=true;
|
|||
-- Ensure it works when run on a secondary node
|
||||
SELECT groupid AS worker_1_group FROM pg_dist_node WHERE nodeport = :worker_1_port \gset
|
||||
SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary');
|
||||
master_add_node
|
||||
----------------------------------------------------
|
||||
(4,1,localhost,8888,default,f,t,secondary,default)
|
||||
master_add_node
|
||||
-----------------
|
||||
4
|
||||
(1 row)
|
||||
|
||||
SELECT start_metadata_sync_to_node('localhost', 8888);
|
||||
|
@ -206,9 +206,9 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport = 8888;
|
|||
|
||||
-- Add a node to another cluster to make sure it's also synced
|
||||
SELECT master_add_secondary_node('localhost', 8889, 'localhost', :worker_1_port, nodecluster => 'second-cluster');
|
||||
master_add_secondary_node
|
||||
-----------------------------------------------------------
|
||||
(5,1,localhost,8889,default,f,t,secondary,second-cluster)
|
||||
master_add_secondary_node
|
||||
---------------------------
|
||||
5
|
||||
(1 row)
|
||||
|
||||
-- Run start_metadata_sync_to_node and check that it marked hasmetadata for that worker
|
||||
|
@ -1166,9 +1166,9 @@ SELECT create_distributed_table('mx_table', 'a');
|
|||
|
||||
\c - postgres - :master_port
|
||||
SELECT master_add_node('localhost', :worker_2_port);
|
||||
master_add_node
|
||||
---------------------------------------------------
|
||||
(6,4,localhost,57638,default,f,t,primary,default)
|
||||
master_add_node
|
||||
-----------------
|
||||
6
|
||||
(1 row)
|
||||
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
|
@ -1393,9 +1393,9 @@ WHERE logicalrelid='mx_ref'::regclass;
|
|||
\c - - - :master_port
|
||||
SELECT master_add_node('localhost', :worker_2_port);
|
||||
NOTICE: Replicating reference table "mx_ref" to the node localhost:57638
|
||||
master_add_node
|
||||
---------------------------------------------------
|
||||
(7,5,localhost,57638,default,f,t,primary,default)
|
||||
master_add_node
|
||||
-----------------
|
||||
7
|
||||
(1 row)
|
||||
|
||||
SELECT shardid, nodename, nodeport
|
||||
|
|
|
@ -41,12 +41,13 @@ SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
|||
(1 row)
|
||||
|
||||
-- re-add the node for next tests
|
||||
SELECT groupid AS worker_2_group FROM master_add_node('localhost', :worker_2_port) \gset
|
||||
SELECT master_add_node('localhost', :worker_2_port) AS worker_2_nodeid \gset
|
||||
SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeid=:worker_2_nodeid \gset
|
||||
-- add a secondary to check we don't attempt to replicate the table to it
|
||||
SELECT isactive FROM master_add_node('localhost', 9000, groupid=>:worker_2_group, noderole=>'secondary');
|
||||
isactive
|
||||
SELECT 1 FROM master_add_node('localhost', 9000, groupid=>:worker_2_group, noderole=>'secondary');
|
||||
?column?
|
||||
----------
|
||||
t
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- remove a node with reference table
|
||||
|
@ -58,10 +59,10 @@ SELECT create_reference_table('remove_node_reference_table');
|
|||
(1 row)
|
||||
|
||||
-- make sure when we add a secondary we don't attempt to add placements to it
|
||||
SELECT isactive FROM master_add_node('localhost', 9001, groupid=>:worker_2_group, noderole=>'secondary');
|
||||
isactive
|
||||
SELECT 1 FROM master_add_node('localhost', 9001, groupid=>:worker_2_group, noderole=>'secondary');
|
||||
?column?
|
||||
----------
|
||||
t
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group;
|
||||
|
|
|
@ -654,11 +654,11 @@ ORDER BY
|
|||
|
||||
\c - - - :master_port
|
||||
-- try using the coordinator as a worker and then dropping the table
|
||||
SELECT master_add_node('localhost', :master_port);
|
||||
SELECT 1 FROM master_add_node('localhost', :master_port);
|
||||
NOTICE: Replicating reference table "transactional_drop_reference" to the node localhost:57636
|
||||
master_add_node
|
||||
---------------------------------------------------------------
|
||||
(1380010,1380008,localhost,57636,default,f,t,primary,default)
|
||||
?column?
|
||||
----------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE citus_local (id serial, k int);
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# add single one of the nodes for the purpose of the test
|
||||
setup
|
||||
{
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57637);
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
CREATE TABLE test_reference_table (test_id integer);
|
||||
SELECT create_reference_table('test_reference_table');
|
||||
|
@ -24,7 +24,7 @@ step "s1-begin"
|
|||
|
||||
step "s1-add-second-worker"
|
||||
{
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
}
|
||||
|
||||
step "s1-remove-second-worker"
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
session "s1"
|
||||
step "s1a"
|
||||
{
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57637);
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
}
|
||||
|
||||
permutation "s1a"
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
setup
|
||||
{
|
||||
SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node;
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57637);
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
}
|
||||
|
||||
# ensure that both nodes exists for the remaining of the isolation tests
|
||||
|
@ -32,7 +32,7 @@ step "s1-begin"
|
|||
|
||||
step "s1-add-worker"
|
||||
{
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
|
@ -44,7 +44,7 @@ step "s1-commit"
|
|||
# on that node as well. After counting objects is done we remove the node again.
|
||||
step "s1-print-distributed-objects"
|
||||
{
|
||||
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
-- print an overview of all distributed objects
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1;
|
||||
|
|
|
@ -15,7 +15,7 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
|||
SELECT master_get_active_worker_nodes();
|
||||
|
||||
-- try to add a node that is already in the cluster
|
||||
SELECT nodeid, groupid FROM master_add_node('localhost', :worker_1_port);
|
||||
SELECT * FROM master_add_node('localhost', :worker_1_port);
|
||||
|
||||
-- get the active nodes
|
||||
SELECT master_get_active_worker_nodes();
|
||||
|
@ -35,7 +35,7 @@ SELECT master_get_active_worker_nodes();
|
|||
SET citus.shard_count TO 16;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
||||
SELECT isactive FROM master_activate_node('localhost', :worker_2_port);
|
||||
SELECT * FROM master_activate_node('localhost', :worker_2_port);
|
||||
CREATE TABLE cluster_management_test (col_1 text, col_2 int);
|
||||
SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash');
|
||||
|
||||
|
@ -95,7 +95,7 @@ ABORT;
|
|||
SELECT master_get_active_worker_nodes();
|
||||
|
||||
-- restore the node for next tests
|
||||
SELECT isactive FROM master_activate_node('localhost', :worker_2_port);
|
||||
SELECT * FROM master_activate_node('localhost', :worker_2_port);
|
||||
|
||||
-- try to remove a node with active placements and see that node removal is failed
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
|
@ -122,7 +122,8 @@ DELETE FROM pg_dist_node WHERE nodeport=:worker_2_port;
|
|||
SELECT * FROM cluster_management_test;
|
||||
|
||||
-- clean-up
|
||||
SELECT groupid as new_group FROM master_add_node('localhost', :worker_2_port) \gset
|
||||
SELECT master_add_node('localhost', :worker_2_port) AS new_node \gset
|
||||
SELECT groupid AS new_group FROM pg_dist_node WHERE nodeid = :new_node \gset
|
||||
UPDATE pg_dist_placement SET groupid = :new_group WHERE groupid = :worker_2_group;
|
||||
|
||||
-- test that you are allowed to remove secondary nodes even if there are placements
|
||||
|
|
|
@ -31,16 +31,17 @@ SELECT master_remove_node('localhost', :worker_2_port);
|
|||
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||
|
||||
-- re-add the node for next tests
|
||||
SELECT groupid AS worker_2_group FROM master_add_node('localhost', :worker_2_port) \gset
|
||||
SELECT master_add_node('localhost', :worker_2_port) AS worker_2_nodeid \gset
|
||||
SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeid=:worker_2_nodeid \gset
|
||||
-- add a secondary to check we don't attempt to replicate the table to it
|
||||
SELECT isactive FROM master_add_node('localhost', 9000, groupid=>:worker_2_group, noderole=>'secondary');
|
||||
SELECT 1 FROM master_add_node('localhost', 9000, groupid=>:worker_2_group, noderole=>'secondary');
|
||||
|
||||
-- remove a node with reference table
|
||||
CREATE TABLE remove_node_reference_table(column1 int);
|
||||
SELECT create_reference_table('remove_node_reference_table');
|
||||
|
||||
-- make sure when we add a secondary we don't attempt to add placements to it
|
||||
SELECT isactive FROM master_add_node('localhost', 9001, groupid=>:worker_2_group, noderole=>'secondary');
|
||||
SELECT 1 FROM master_add_node('localhost', 9001, groupid=>:worker_2_group, noderole=>'secondary');
|
||||
SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group;
|
||||
-- make sure when we disable a secondary we don't remove any placements
|
||||
SELECT master_disable_node('localhost', 9001);
|
||||
|
|
|
@ -364,7 +364,7 @@ ORDER BY
|
|||
\c - - - :master_port
|
||||
|
||||
-- try using the coordinator as a worker and then dropping the table
|
||||
SELECT master_add_node('localhost', :master_port);
|
||||
SELECT 1 FROM master_add_node('localhost', :master_port);
|
||||
CREATE TABLE citus_local (id serial, k int);
|
||||
SELECT create_distributed_table('citus_local', 'id');
|
||||
INSERT INTO citus_local (k) VALUES (2);
|
||||
|
|
Loading…
Reference in New Issue