mirror of https://github.com/citusdata/citus.git
Replace master with citus in logs and comments (#5210)
I replaced - master_add_node, - master_add_inactive_node - master_activate_node with - citus_add_node, - citus_add_inactive_node - citus_activate_node respectively.pull/5197/head^2
parent
51fa7a2208
commit
7e39c7ea83
|
@ -81,9 +81,9 @@ EnsureDependenciesExistOnAllNodes(const ObjectAddress *target)
|
|||
/*
|
||||
* Make sure that no new nodes are added after this point until the end of the
|
||||
* transaction by taking a RowShareLock on pg_dist_node, which conflicts with the
|
||||
* ExclusiveLock taken by master_add_node.
|
||||
* ExclusiveLock taken by citus_add_node.
|
||||
* This guarantees that all active nodes will have the object, because they will
|
||||
* either get it now, or get it in master_add_node after this transaction finishes and
|
||||
* either get it now, or get it in citus_add_node after this transaction finishes and
|
||||
* the pg_dist_object record becomes visible.
|
||||
*/
|
||||
List *workerNodeList = ActivePrimaryNonCoordinatorNodeList(RowShareLock);
|
||||
|
|
|
@ -152,9 +152,9 @@ PostprocessCreateExtensionStmt(Node *node, const char *queryString)
|
|||
/*
|
||||
* Make sure that no new nodes are added after this point until the end of the
|
||||
* transaction by taking a RowShareLock on pg_dist_node, which conflicts with the
|
||||
* ExclusiveLock taken by master_add_node.
|
||||
* ExclusiveLock taken by citus_add_node.
|
||||
* This guarantees that all active nodes will have the extension, because they will
|
||||
* either get it now, or get it in master_add_node after this transaction finishes and
|
||||
* either get it now, or get it in citus_add_node after this transaction finishes and
|
||||
* the pg_dist_object record becomes visible.
|
||||
*/
|
||||
LockRelationOid(DistNodeRelationId(), RowShareLock);
|
||||
|
@ -265,9 +265,9 @@ PreprocessDropExtensionStmt(Node *node, const char *queryString,
|
|||
/*
|
||||
* Make sure that no new nodes are added after this point until the end of the
|
||||
* transaction by taking a RowShareLock on pg_dist_node, which conflicts with the
|
||||
* ExclusiveLock taken by master_add_node.
|
||||
* ExclusiveLock taken by citus_add_node.
|
||||
* This guarantees that all active nodes will drop the extension, because they will
|
||||
* either get it now, or get it in master_add_node after this transaction finishes and
|
||||
* either get it now, or get it in citus_add_node after this transaction finishes and
|
||||
* the pg_dist_object record becomes visible.
|
||||
*/
|
||||
LockRelationOid(DistNodeRelationId(), RowShareLock);
|
||||
|
@ -401,7 +401,7 @@ PreprocessAlterExtensionSchemaStmt(Node *node, const char *queryString,
|
|||
/*
|
||||
* Make sure that no new nodes are added after this point until the end of the
|
||||
* transaction by taking a RowShareLock on pg_dist_node, which conflicts with the
|
||||
* ExclusiveLock taken by master_add_node.
|
||||
* ExclusiveLock taken by citus_add_node.
|
||||
* This guarantees that all active nodes will update the extension schema after
|
||||
* this transaction finishes and the pg_dist_object record becomes visible.
|
||||
*/
|
||||
|
@ -469,9 +469,9 @@ PreprocessAlterExtensionUpdateStmt(Node *node, const char *queryString,
|
|||
/*
|
||||
* Make sure that no new nodes are added after this point until the end of the
|
||||
* transaction by taking a RowShareLock on pg_dist_node, which conflicts with the
|
||||
* ExclusiveLock taken by master_add_node.
|
||||
* ExclusiveLock taken by citus_add_node.
|
||||
* This guarantees that all active nodes will update the extension version, because
|
||||
* they will either get it now, or get it in master_add_node after this transaction
|
||||
* they will either get it now, or get it in citus_add_node after this transaction
|
||||
* finishes and the pg_dist_object record becomes visible.
|
||||
*/
|
||||
LockRelationOid(DistNodeRelationId(), RowShareLock);
|
||||
|
|
|
@ -143,7 +143,7 @@ PostprocessAlterRoleStmt(Node *node, const char *queryString)
|
|||
/*
|
||||
* Make sure that no new nodes are added after this point until the end of the
|
||||
* transaction by taking a RowShareLock on pg_dist_node, which conflicts with the
|
||||
* ExclusiveLock taken by master_add_node.
|
||||
* ExclusiveLock taken by citus_add_node.
|
||||
*/
|
||||
LockRelationOid(DistNodeRelationId(), RowShareLock);
|
||||
|
||||
|
|
|
@ -133,9 +133,9 @@ PreprocessCompositeTypeStmt(Node *node, const char *queryString,
|
|||
/*
|
||||
* Make sure that no new nodes are added after this point until the end of the
|
||||
* transaction by taking a RowShareLock on pg_dist_node, which conflicts with the
|
||||
* ExclusiveLock taken by master_add_node.
|
||||
* ExclusiveLock taken by citus_add_node.
|
||||
* This guarantees that all active nodes will have the object, because they will
|
||||
* either get it now, or get it in master_add_node after this transaction finishes and
|
||||
* either get it now, or get it in citus_add_node after this transaction finishes and
|
||||
* the pg_dist_object record becomes visible.
|
||||
*/
|
||||
LockRelationOid(DistNodeRelationId(), RowShareLock);
|
||||
|
|
|
@ -182,7 +182,7 @@ StartMetadataSyncToNode(const char *nodeNameString, int32 nodePort)
|
|||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("you cannot sync metadata to a non-existent node"),
|
||||
errhint("First, add the node with SELECT master_add_node"
|
||||
errhint("First, add the node with SELECT citus_add_node"
|
||||
"(%s,%d)", escapedNodeName, nodePort)));
|
||||
}
|
||||
|
||||
|
@ -191,7 +191,7 @@ StartMetadataSyncToNode(const char *nodeNameString, int32 nodePort)
|
|||
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("you cannot sync metadata to an inactive node"),
|
||||
errhint("First, activate the node with "
|
||||
"SELECT master_activate_node(%s,%d)",
|
||||
"SELECT citus_activate_node(%s,%d)",
|
||||
escapedNodeName, nodePort)));
|
||||
}
|
||||
|
||||
|
|
|
@ -380,7 +380,7 @@ master_add_secondary_node(PG_FUNCTION_ARGS)
|
|||
* node should not have any active placements.
|
||||
* This function also deletes all reference table placements belong to the given node from
|
||||
* pg_dist_placement, but it does not drop actual placement at the node. In the case of
|
||||
* re-adding the node, master_add_node first drops and re-creates the reference tables.
|
||||
* re-adding the node, citus_add_node first drops and re-creates the reference tables.
|
||||
*/
|
||||
Datum
|
||||
citus_remove_node(PG_FUNCTION_ARGS)
|
||||
|
@ -446,7 +446,7 @@ citus_disable_node(PG_FUNCTION_ARGS)
|
|||
ereport(NOTICE, (errmsg(
|
||||
"Node %s:%d has active shard placements. Some queries "
|
||||
"may fail after this operation. Use "
|
||||
"SELECT master_activate_node('%s', %d) to activate this "
|
||||
"SELECT citus_activate_node('%s', %d) to activate this "
|
||||
"node back.",
|
||||
workerNode->workerName, nodePort,
|
||||
workerNode->workerName,
|
||||
|
|
|
@ -130,7 +130,7 @@ EnsureReferenceTablesExistOnAllNodesExtended(char transferMode)
|
|||
uint64 shardId = shardInterval->shardId;
|
||||
|
||||
/*
|
||||
* We only take an access share lock, otherwise we'll hold up master_add_node.
|
||||
* We only take an access share lock, otherwise we'll hold up citus_add_node.
|
||||
* In case of create_reference_table() where we don't want concurrent writes
|
||||
* to pg_dist_node, we have already acquired ShareLock on pg_dist_node.
|
||||
*/
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
* compiler constants for pg_dist_node
|
||||
* ----------------
|
||||
*
|
||||
* n.b. master_add_node, master_add_inactive_node, and master_activate_node all
|
||||
* n.b. citus_add_node, citus_add_inactive_node, and citus_activate_node all
|
||||
* directly return pg_dist_node tuples. This means their definitions (and
|
||||
* in particular their OUT parameters) must be changed whenever the definition of
|
||||
* pg_dist_node changes.
|
||||
|
|
|
@ -54,7 +54,7 @@ ORDER BY placementid;
|
|||
(2 rows)
|
||||
|
||||
SELECT master_disable_node('localhost', :worker_2_proxy_port);
|
||||
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('localhost', 9060) to activate this node back.
|
||||
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT citus_activate_node('localhost', 9060) to activate this node back.
|
||||
master_disable_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
|
|
@ -143,7 +143,7 @@ HINT: To proceed, either drop the distributed tables or use undistribute_table(
|
|||
-- try to disable a node with active placements see that node is removed
|
||||
-- observe that a notification is displayed
|
||||
SELECT master_disable_node('localhost', :worker_2_port);
|
||||
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('localhost', 57638) to activate this node back.
|
||||
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT citus_activate_node('localhost', 57638) to activate this node back.
|
||||
master_disable_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -170,7 +170,7 @@ NOTICE: Replicating reference table "test_reference_table" to the node localhos
|
|||
|
||||
DROP TABLE test_reference_table;
|
||||
SELECT master_disable_node('localhost', :worker_2_port);
|
||||
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('localhost', 57638) to activate this node back.
|
||||
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT citus_activate_node('localhost', 57638) to activate this node back.
|
||||
master_disable_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
|
|
@ -369,7 +369,7 @@ SELECT hasmetadata, metadatasynced, shouldhaveshards FROM pg_dist_node WHERE nod
|
|||
\c - - - :master_port
|
||||
-- verify that mx workers are updated when disabling/activating nodes
|
||||
SELECT citus_disable_node('localhost', :worker_1_port);
|
||||
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('localhost', 57637) to activate this node back.
|
||||
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT citus_activate_node('localhost', 57637) to activate this node back.
|
||||
citus_disable_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
|
|
@ -650,7 +650,7 @@ SELECT shardid, nodename, nodeport
|
|||
|
||||
-- disable the first node
|
||||
SELECT master_disable_node('localhost', :worker_1_port);
|
||||
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('localhost', 57637) to activate this node back.
|
||||
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT citus_activate_node('localhost', 57637) to activate this node back.
|
||||
master_disable_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
|
Loading…
Reference in New Issue