mirror of https://github.com/citusdata/citus.git
Merge pull request #5486 from citusdata/disable_node_async
Allow disabling node(s) when multiple failures happenpull/5408/head^2
commit
ab365a335d
|
@ -1093,7 +1093,7 @@ EnsureSequentialModeForFunctionDDL(void)
|
|||
* and triggers the metadata syncs if the node has not the metadata. Later,
|
||||
* maintenance daemon will sync the metadata to nodes.
|
||||
*/
|
||||
static void
|
||||
void
|
||||
TriggerSyncMetadataToPrimaryNodes(void)
|
||||
{
|
||||
List *workerList = ActivePrimaryNonCoordinatorNodeList(ShareLock);
|
||||
|
|
|
@ -428,64 +428,98 @@ citus_disable_node(PG_FUNCTION_ARGS)
|
|||
{
|
||||
text *nodeNameText = PG_GETARG_TEXT_P(0);
|
||||
int32 nodePort = PG_GETARG_INT32(1);
|
||||
bool forceDisableNode = PG_GETARG_BOOL(2);
|
||||
|
||||
char *nodeName = text_to_cstring(nodeNameText);
|
||||
WorkerNode *workerNode = ModifiableWorkerNode(nodeName, nodePort);
|
||||
|
||||
/* there is no concept of invalid coordinator */
|
||||
bool isActive = false;
|
||||
bool onlyConsiderActivePlacements = false;
|
||||
MemoryContext savedContext = CurrentMemoryContext;
|
||||
ErrorIfCoordinatorMetadataSetFalse(workerNode, BoolGetDatum(isActive),
|
||||
"isactive");
|
||||
|
||||
PG_TRY();
|
||||
WorkerNode *firstWorkerNode = GetFirstPrimaryWorkerNode();
|
||||
if (!forceDisableNode && firstWorkerNode &&
|
||||
firstWorkerNode->nodeId == workerNode->nodeId)
|
||||
{
|
||||
if (NodeIsPrimary(workerNode))
|
||||
{
|
||||
/*
|
||||
* Delete reference table placements so they are not taken into account
|
||||
* for the check if there are placements after this.
|
||||
*/
|
||||
DeleteAllReferenceTablePlacementsFromNodeGroup(workerNode->groupId);
|
||||
|
||||
if (NodeGroupHasShardPlacements(workerNode->groupId,
|
||||
onlyConsiderActivePlacements))
|
||||
{
|
||||
ereport(NOTICE, (errmsg(
|
||||
"Node %s:%d has active shard placements. Some queries "
|
||||
"may fail after this operation. Use "
|
||||
"SELECT citus_activate_node('%s', %d) to activate this "
|
||||
"node back.",
|
||||
workerNode->workerName, nodePort,
|
||||
workerNode->workerName,
|
||||
nodePort)));
|
||||
}
|
||||
}
|
||||
|
||||
SetNodeState(nodeName, nodePort, isActive);
|
||||
TransactionModifiedNodeMetadata = true;
|
||||
/*
|
||||
* We sync metadata async and optionally in the background worker,
|
||||
* it would mean that some nodes might get the updates while other
|
||||
* not. And, if the node metadata that is changing is the first
|
||||
* worker node, the problem gets nasty. We serialize modifications
|
||||
* to replicated tables by acquiring locks on the first worker node.
|
||||
*
|
||||
* If some nodes get the metadata changes and some do not, they'd be
|
||||
* acquiring the locks on different nodes. Hence, having the
|
||||
* possibility of diverged shard placements for the same shard.
|
||||
*
|
||||
* To prevent that, we currently do not allow disabling the first
|
||||
* worker node.
|
||||
*/
|
||||
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("disabling the first worker node in the "
|
||||
"metadata is not allowed"),
|
||||
errhint("You can force disabling node, but this operation "
|
||||
"might cause replicated shards to diverge: SELECT "
|
||||
"citus_disable_node('%s', %d, force:=true);",
|
||||
workerNode->workerName,
|
||||
nodePort)));
|
||||
}
|
||||
PG_CATCH();
|
||||
|
||||
/*
|
||||
* First, locally mark the node as inactive. We'll later trigger background
|
||||
* worker to sync the metadata changes to the relevant nodes.
|
||||
*/
|
||||
workerNode =
|
||||
SetWorkerColumnLocalOnly(workerNode,
|
||||
Anum_pg_dist_node_isactive,
|
||||
BoolGetDatum(isActive));
|
||||
if (NodeIsPrimary(workerNode))
|
||||
{
|
||||
/* CopyErrorData() requires (CurrentMemoryContext != ErrorContext) */
|
||||
MemoryContextSwitchTo(savedContext);
|
||||
ErrorData *edata = CopyErrorData();
|
||||
/*
|
||||
* We do not allow disabling nodes if it contains any
|
||||
* primary placement that is the "only" active placement
|
||||
* for any given shard.
|
||||
*/
|
||||
ErrorIfNodeContainsNonRemovablePlacements(workerNode);
|
||||
|
||||
if (ClusterHasKnownMetadataWorkers())
|
||||
bool onlyConsiderActivePlacements = false;
|
||||
if (NodeGroupHasShardPlacements(workerNode->groupId,
|
||||
onlyConsiderActivePlacements))
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("Disabling %s:%d failed", workerNode->workerName,
|
||||
nodePort),
|
||||
errdetail("%s", edata->message),
|
||||
errhint(
|
||||
"If you are using MX, try stop_metadata_sync_to_node(hostname, port) "
|
||||
"for nodes that are down before disabling them.")));
|
||||
}
|
||||
else
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("Disabling %s:%d failed", workerNode->workerName,
|
||||
nodePort),
|
||||
errdetail("%s", edata->message)));
|
||||
ereport(NOTICE, (errmsg(
|
||||
"Node %s:%d has active shard placements. Some queries "
|
||||
"may fail after this operation. Use "
|
||||
"SELECT citus_activate_node('%s', %d) to activate this "
|
||||
"node back.",
|
||||
workerNode->workerName, nodePort,
|
||||
workerNode->workerName,
|
||||
nodePort)));
|
||||
}
|
||||
|
||||
/*
|
||||
* Delete replicated table placements from the coordinator's metadata,
|
||||
* but not remotely. That is because one more more of the remote
|
||||
* nodes might be down. Instead, we let the background worker
|
||||
* to sync the metadata when possible.
|
||||
*/
|
||||
bool forceRemoteDelete = false;
|
||||
DeleteAllReplicatedTablePlacementsFromNodeGroup(workerNode->groupId,
|
||||
forceRemoteDelete);
|
||||
}
|
||||
|
||||
TransactionModifiedNodeMetadata = true;
|
||||
|
||||
/*
|
||||
* We have not propagated the metadata changes yet, make sure that all the
|
||||
* active nodes get the metadata updates. We defer this operation to the
|
||||
* background worker to make it possible disabling nodes when multiple nodes
|
||||
* are down.
|
||||
*/
|
||||
if (UnsetMetadataSyncedForAll())
|
||||
{
|
||||
TriggerMetadataSyncOnCommit();
|
||||
}
|
||||
PG_END_TRY();
|
||||
|
||||
PG_RETURN_VOID();
|
||||
}
|
||||
|
@ -836,15 +870,43 @@ ActivateNode(char *nodeName, int nodePort)
|
|||
/* take an exclusive lock on pg_dist_node to serialize pg_dist_node changes */
|
||||
LockRelationOid(DistNodeRelationId(), ExclusiveLock);
|
||||
|
||||
WorkerNode *newWorkerNode = SetNodeState(nodeName, nodePort, isActive);
|
||||
/*
|
||||
* First, locally mark the node is active, if everything goes well,
|
||||
* we are going to sync this information to all the metadata nodes.
|
||||
*/
|
||||
WorkerNode *workerNode = FindWorkerNodeAnyCluster(nodeName, nodePort);
|
||||
if (workerNode == NULL)
|
||||
{
|
||||
ereport(ERROR, (errmsg("node at \"%s:%u\" does not exist", nodeName, nodePort)));
|
||||
}
|
||||
|
||||
SetUpDistributedTableDependencies(newWorkerNode);
|
||||
workerNode =
|
||||
SetWorkerColumnLocalOnly(workerNode, Anum_pg_dist_node_isactive,
|
||||
BoolGetDatum(isActive));
|
||||
bool syncMetadata =
|
||||
EnableMetadataSyncByDefault && NodeIsPrimary(workerNode);
|
||||
|
||||
if (EnableMetadataSyncByDefault && NodeIsPrimary(newWorkerNode))
|
||||
if (syncMetadata)
|
||||
{
|
||||
/*
|
||||
* We are going to sync the metadata anyway in this transaction, so do
|
||||
* not fail just because the current metadata is not synced.
|
||||
*/
|
||||
SetWorkerColumn(workerNode, Anum_pg_dist_node_metadatasynced,
|
||||
BoolGetDatum(isActive));
|
||||
}
|
||||
|
||||
SetUpDistributedTableDependencies(workerNode);
|
||||
|
||||
if (syncMetadata)
|
||||
{
|
||||
StartMetadataSyncToNode(nodeName, nodePort);
|
||||
}
|
||||
|
||||
/* finally, let all other active metadata nodes to learn about this change */
|
||||
WorkerNode *newWorkerNode = SetNodeState(nodeName, nodePort, isActive);
|
||||
Assert(newWorkerNode->nodeId == workerNode->nodeId);
|
||||
|
||||
return newWorkerNode->nodeId;
|
||||
}
|
||||
|
||||
|
@ -1303,7 +1365,9 @@ RemoveNodeFromCluster(char *nodeName, int32 nodePort)
|
|||
* Delete reference table placements so they are not taken into account
|
||||
* for the check if there are placements after this.
|
||||
*/
|
||||
DeleteAllReferenceTablePlacementsFromNodeGroup(workerNode->groupId);
|
||||
bool forceRemoteDelete = true;
|
||||
DeleteAllReplicatedTablePlacementsFromNodeGroup(workerNode->groupId,
|
||||
forceRemoteDelete);
|
||||
|
||||
/*
|
||||
* Secondary nodes are read-only, never 2PC is used.
|
||||
|
@ -1362,7 +1426,7 @@ ErrorIfNodeContainsNonRemovablePlacements(WorkerNode *workerNode)
|
|||
|
||||
/*
|
||||
* PlacementHasActivePlacementOnAnotherGroup returns true if there is at least
|
||||
* one more healthy placement of the input sourcePlacement on another group.
|
||||
* one more active placement of the input sourcePlacement on another group.
|
||||
*/
|
||||
static bool
|
||||
PlacementHasActivePlacementOnAnotherGroup(GroupShardPlacement *sourcePlacement)
|
||||
|
@ -1370,18 +1434,18 @@ PlacementHasActivePlacementOnAnotherGroup(GroupShardPlacement *sourcePlacement)
|
|||
uint64 shardId = sourcePlacement->shardId;
|
||||
List *activePlacementList = ActiveShardPlacementList(shardId);
|
||||
|
||||
bool foundHealtyPlacementOnAnotherGroup = false;
|
||||
bool foundActivePlacementOnAnotherGroup = false;
|
||||
ShardPlacement *activePlacement = NULL;
|
||||
foreach_ptr(activePlacement, activePlacementList)
|
||||
{
|
||||
if (activePlacement->groupId != sourcePlacement->groupId)
|
||||
{
|
||||
foundHealtyPlacementOnAnotherGroup = true;
|
||||
foundActivePlacementOnAnotherGroup = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return foundHealtyPlacementOnAnotherGroup;
|
||||
return foundActivePlacementOnAnotherGroup;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
-- citus--10.2-4--11.0-1
|
||||
|
||||
-- bump version to 11.0-1
|
||||
#include "udfs/citus_disable_node/11.0-1.sql"
|
||||
|
||||
DROP FUNCTION IF EXISTS pg_catalog.master_apply_delete_command(text);
|
||||
DROP FUNCTION pg_catalog.master_get_table_metadata(text);
|
||||
|
|
|
@ -31,3 +31,11 @@ COMMENT ON FUNCTION master_append_table_to_shard(bigint, text, text, integer)
|
|||
|
||||
GRANT ALL ON FUNCTION start_metadata_sync_to_node(text, integer) TO PUBLIC;
|
||||
GRANT ALL ON FUNCTION stop_metadata_sync_to_node(text, integer,bool) TO PUBLIC;
|
||||
|
||||
DROP FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer, force bool);
|
||||
CREATE FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_disable_node$$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer)
|
||||
IS 'removes node from the cluster temporarily';
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
DROP FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer);
|
||||
CREATE FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer, force bool default false)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_disable_node$$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer, force bool)
|
||||
IS 'removes node from the cluster temporarily';
|
||||
|
||||
REVOKE ALL ON FUNCTION pg_catalog.citus_disable_node(text,int, bool) FROM PUBLIC;
|
|
@ -1,8 +1,9 @@
|
|||
CREATE FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer)
|
||||
DROP FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer);
|
||||
CREATE FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer, force bool default false)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_disable_node$$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer)
|
||||
COMMENT ON FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer, force bool)
|
||||
IS 'removes node from the cluster temporarily';
|
||||
|
||||
REVOKE ALL ON FUNCTION pg_catalog.citus_disable_node(text,int) FROM PUBLIC;
|
||||
REVOKE ALL ON FUNCTION pg_catalog.citus_disable_node(text,int, bool) FROM PUBLIC;
|
||||
|
|
|
@ -48,6 +48,7 @@ static StringInfo CopyShardPlacementToWorkerNodeQuery(
|
|||
static void ReplicateShardToNode(ShardInterval *shardInterval, char *nodeName,
|
||||
int nodePort);
|
||||
static bool AnyRelationsModifiedInTransaction(List *relationIdList);
|
||||
static List * ReplicatedMetadataSyncedDistributedTableList(void);
|
||||
|
||||
/* exports for SQL callable functions */
|
||||
PG_FUNCTION_INFO_V1(upgrade_to_reference_table);
|
||||
|
@ -426,49 +427,87 @@ CreateReferenceTableColocationId()
|
|||
|
||||
|
||||
/*
|
||||
* DeleteAllReferenceTablePlacementsFromNodeGroup function iterates over list of reference
|
||||
* tables and deletes all reference table placements from pg_dist_placement table
|
||||
* for given group.
|
||||
* DeleteAllReplicatedTablePlacementsFromNodeGroup function iterates over
|
||||
* list of reference and replicated hash distributed tables and deletes
|
||||
* all placements from pg_dist_placement table for given group.
|
||||
*/
|
||||
void
|
||||
DeleteAllReferenceTablePlacementsFromNodeGroup(int32 groupId)
|
||||
DeleteAllReplicatedTablePlacementsFromNodeGroup(int32 groupId, bool forceRemoteDelete)
|
||||
{
|
||||
List *referenceTableList = CitusTableTypeIdList(REFERENCE_TABLE);
|
||||
List *replicatedMetadataSyncedDistributedTableList =
|
||||
ReplicatedMetadataSyncedDistributedTableList();
|
||||
|
||||
List *replicatedTableList =
|
||||
list_concat(referenceTableList, replicatedMetadataSyncedDistributedTableList);
|
||||
|
||||
/* if there are no reference tables, we do not need to do anything */
|
||||
if (list_length(referenceTableList) == 0)
|
||||
if (list_length(replicatedTableList) == 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
StringInfo deletePlacementCommand = makeStringInfo();
|
||||
Oid referenceTableId = InvalidOid;
|
||||
foreach_oid(referenceTableId, referenceTableList)
|
||||
Oid replicatedTableId = InvalidOid;
|
||||
foreach_oid(replicatedTableId, replicatedTableList)
|
||||
{
|
||||
List *placements = GroupShardPlacementsForTableOnGroup(referenceTableId,
|
||||
groupId);
|
||||
List *placements =
|
||||
GroupShardPlacementsForTableOnGroup(replicatedTableId, groupId);
|
||||
if (list_length(placements) == 0)
|
||||
{
|
||||
/* this happens if the node was previously disabled */
|
||||
/*
|
||||
* This happens either the node was previously disabled or the table
|
||||
* doesn't have placement on this node.
|
||||
*/
|
||||
continue;
|
||||
}
|
||||
|
||||
GroupShardPlacement *placement = (GroupShardPlacement *) linitial(placements);
|
||||
GroupShardPlacement *placement = NULL;
|
||||
foreach_ptr(placement, placements)
|
||||
{
|
||||
LockShardDistributionMetadata(placement->shardId, ExclusiveLock);
|
||||
|
||||
LockShardDistributionMetadata(placement->shardId, ExclusiveLock);
|
||||
DeleteShardPlacementRow(placement->placementId);
|
||||
|
||||
DeleteShardPlacementRow(placement->placementId);
|
||||
if (forceRemoteDelete)
|
||||
{
|
||||
resetStringInfo(deletePlacementCommand);
|
||||
appendStringInfo(deletePlacementCommand,
|
||||
"DELETE FROM pg_catalog.pg_dist_placement "
|
||||
"WHERE placementid = " UINT64_FORMAT,
|
||||
placement->placementId);
|
||||
|
||||
resetStringInfo(deletePlacementCommand);
|
||||
appendStringInfo(deletePlacementCommand,
|
||||
"DELETE FROM pg_dist_placement WHERE placementid = "
|
||||
UINT64_FORMAT,
|
||||
placement->placementId);
|
||||
SendCommandToWorkersWithMetadata(deletePlacementCommand->data);
|
||||
SendCommandToWorkersWithMetadata(deletePlacementCommand->data);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ReplicatedMetadataSyncedDistributedTableList is a helper function which returns the
|
||||
* list of replicated hash distributed tables.
|
||||
*/
|
||||
static List *
|
||||
ReplicatedMetadataSyncedDistributedTableList(void)
|
||||
{
|
||||
List *distributedRelationList = CitusTableTypeIdList(DISTRIBUTED_TABLE);
|
||||
List *replicatedHashDistributedTableList = NIL;
|
||||
|
||||
Oid relationId = InvalidOid;
|
||||
foreach_oid(relationId, distributedRelationList)
|
||||
{
|
||||
if (ShouldSyncTableMetadata(relationId) && !SingleReplicatedTable(relationId))
|
||||
{
|
||||
replicatedHashDistributedTableList =
|
||||
lappend_oid(replicatedHashDistributedTableList, relationId);
|
||||
}
|
||||
}
|
||||
|
||||
return replicatedHashDistributedTableList;
|
||||
}
|
||||
|
||||
|
||||
/* CompareOids is a comparison function for sort shard oids */
|
||||
int
|
||||
CompareOids(const void *leftElement, const void *rightElement)
|
||||
|
|
|
@ -470,33 +470,18 @@ SingleReplicatedTable(Oid relationId)
|
|||
return false;
|
||||
}
|
||||
|
||||
/* for hash distributed tables, it is sufficient to only check one shard */
|
||||
if (IsCitusTableType(relationId, HASH_DISTRIBUTED))
|
||||
List *shardIntervalList = LoadShardList(relationId);
|
||||
uint64 *shardIdPointer = NULL;
|
||||
foreach_ptr(shardIdPointer, shardIntervalList)
|
||||
{
|
||||
/* checking only for the first shard id should suffice */
|
||||
uint64 shardId = *(uint64 *) linitial(shardList);
|
||||
|
||||
uint64 shardId = *shardIdPointer;
|
||||
shardPlacementList = ShardPlacementListWithoutOrphanedPlacements(shardId);
|
||||
|
||||
if (list_length(shardPlacementList) != 1)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
List *shardIntervalList = LoadShardList(relationId);
|
||||
uint64 *shardIdPointer = NULL;
|
||||
foreach_ptr(shardIdPointer, shardIntervalList)
|
||||
{
|
||||
uint64 shardId = *shardIdPointer;
|
||||
shardPlacementList = ShardPlacementListWithoutOrphanedPlacements(shardId);
|
||||
|
||||
if (list_length(shardPlacementList) != 1)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -74,11 +74,10 @@ extern Oid GetAttributeTypeOid(Oid relationId, AttrNumber attnum);
|
|||
"VALUES (" UINT64_FORMAT ", %d, " UINT64_FORMAT \
|
||||
", %d, " UINT64_FORMAT \
|
||||
") " \
|
||||
"ON CONFLICT (placementid) DO UPDATE SET " \
|
||||
"shardid = EXCLUDED.shardid, " \
|
||||
"ON CONFLICT (shardid, groupid) DO UPDATE SET " \
|
||||
"shardstate = EXCLUDED.shardstate, " \
|
||||
"shardlength = EXCLUDED.shardlength, " \
|
||||
"groupid = EXCLUDED.groupid"
|
||||
"placementid = EXCLUDED.placementid"
|
||||
#define METADATA_SYNC_CHANNEL "metadata_sync"
|
||||
|
||||
|
||||
|
|
|
@ -21,7 +21,8 @@
|
|||
extern void EnsureReferenceTablesExistOnAllNodes(void);
|
||||
extern void EnsureReferenceTablesExistOnAllNodesExtended(char transferMode);
|
||||
extern uint32 CreateReferenceTableColocationId(void);
|
||||
extern void DeleteAllReferenceTablePlacementsFromNodeGroup(int32 groupId);
|
||||
extern void DeleteAllReplicatedTablePlacementsFromNodeGroup(int32 groupId, bool
|
||||
forceRemoteDelete);
|
||||
extern int CompareOids(const void *leftElement, const void *rightElement);
|
||||
extern int ReferenceTableReplicationFactor(void);
|
||||
extern void ReplicateAllReferenceTablesToNode(char *nodeName, int nodePort);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
test: multi_cluster_management
|
||||
test: multi_test_helpers multi_test_helpers_superuser columnar_test_helpers
|
||||
test: multi_cluster_management
|
||||
test: multi_test_catalog_views
|
||||
|
||||
test: columnar_create
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
--
|
||||
-- failure_add_disable_node tests master_add_node, master_remove_node
|
||||
-- master_activate_node for failures.
|
||||
-- master_disable_node and master_add_inactive_node can not be
|
||||
-- citus_disable_node_and_wait and master_add_inactive_node can not be
|
||||
-- tested as they don't create network activity
|
||||
--
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
|
@ -53,9 +53,15 @@ ORDER BY placementid;
|
|||
200000 | 1
|
||||
(2 rows)
|
||||
|
||||
SELECT master_disable_node('localhost', :worker_2_proxy_port);
|
||||
SELECT citus_disable_node('localhost', :worker_2_proxy_port, true);
|
||||
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT citus_activate_node('localhost', 9060) to activate this node back.
|
||||
master_disable_node
|
||||
citus_disable_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
|
|
@ -10,6 +10,10 @@ SELECT pg_reload_conf();
|
|||
t
|
||||
(1 row)
|
||||
|
||||
CREATE OR REPLACE FUNCTION wait_until_metadata_sync(timeout INTEGER DEFAULT 15000)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus';
|
||||
-- Add some helper functions for sending commands to mitmproxy
|
||||
CREATE FUNCTION citus.mitmproxy(text) RETURNS TABLE(result text) AS $$
|
||||
DECLARE
|
||||
|
|
|
@ -66,6 +66,8 @@ NOTICE: cleaned up 2 orphaned shards
|
|||
SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_to_reference_shard_rebalance.%' AND refd_relid LIKE 'fkey_to_reference_shard_rebalance.%' ORDER BY 1,2,3;
|
||||
name | relid | refd_relid
|
||||
---------------------------------------------------------------------
|
||||
referencing_table2_id_fkey | fkey_to_reference_shard_rebalance.referencing_table2 | fkey_to_reference_shard_rebalance.referencing_table
|
||||
referencing_table2_id_fkey | fkey_to_reference_shard_rebalance.referencing_table2 | fkey_to_reference_shard_rebalance.referencing_table
|
||||
referencing_table2_id_fkey_15000009 | fkey_to_reference_shard_rebalance.referencing_table2_15000009 | fkey_to_reference_shard_rebalance.referencing_table_15000001
|
||||
referencing_table2_id_fkey_15000010 | fkey_to_reference_shard_rebalance.referencing_table2_15000010 | fkey_to_reference_shard_rebalance.referencing_table_15000002
|
||||
referencing_table2_id_fkey_15000011 | fkey_to_reference_shard_rebalance.referencing_table2_15000011 | fkey_to_reference_shard_rebalance.referencing_table_15000003
|
||||
|
@ -74,6 +76,8 @@ SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_to_reference_shard_r
|
|||
referencing_table2_id_fkey_15000014 | fkey_to_reference_shard_rebalance.referencing_table2_15000014 | fkey_to_reference_shard_rebalance.referencing_table_15000006
|
||||
referencing_table2_id_fkey_15000015 | fkey_to_reference_shard_rebalance.referencing_table2_15000015 | fkey_to_reference_shard_rebalance.referencing_table_15000007
|
||||
referencing_table2_id_fkey_15000016 | fkey_to_reference_shard_rebalance.referencing_table2_15000016 | fkey_to_reference_shard_rebalance.referencing_table_15000008
|
||||
referencing_table2_ref_id_fkey | fkey_to_reference_shard_rebalance.referencing_table2 | fkey_to_reference_shard_rebalance.referenced_table
|
||||
referencing_table2_ref_id_fkey | fkey_to_reference_shard_rebalance.referencing_table2 | fkey_to_reference_shard_rebalance.referenced_table
|
||||
referencing_table2_ref_id_fkey_15000009 | fkey_to_reference_shard_rebalance.referencing_table2_15000009 | fkey_to_reference_shard_rebalance.referenced_table_15000000
|
||||
referencing_table2_ref_id_fkey_15000010 | fkey_to_reference_shard_rebalance.referencing_table2_15000010 | fkey_to_reference_shard_rebalance.referenced_table_15000000
|
||||
referencing_table2_ref_id_fkey_15000011 | fkey_to_reference_shard_rebalance.referencing_table2_15000011 | fkey_to_reference_shard_rebalance.referenced_table_15000000
|
||||
|
@ -82,6 +86,8 @@ SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_to_reference_shard_r
|
|||
referencing_table2_ref_id_fkey_15000014 | fkey_to_reference_shard_rebalance.referencing_table2_15000014 | fkey_to_reference_shard_rebalance.referenced_table_15000000
|
||||
referencing_table2_ref_id_fkey_15000015 | fkey_to_reference_shard_rebalance.referencing_table2_15000015 | fkey_to_reference_shard_rebalance.referenced_table_15000000
|
||||
referencing_table2_ref_id_fkey_15000016 | fkey_to_reference_shard_rebalance.referencing_table2_15000016 | fkey_to_reference_shard_rebalance.referenced_table_15000000
|
||||
referencing_table_id_fkey | fkey_to_reference_shard_rebalance.referencing_table | fkey_to_reference_shard_rebalance.referenced_table
|
||||
referencing_table_id_fkey | fkey_to_reference_shard_rebalance.referencing_table | fkey_to_reference_shard_rebalance.referenced_table
|
||||
referencing_table_id_fkey_15000001 | fkey_to_reference_shard_rebalance.referencing_table_15000001 | fkey_to_reference_shard_rebalance.referenced_table_15000000
|
||||
referencing_table_id_fkey_15000002 | fkey_to_reference_shard_rebalance.referencing_table_15000002 | fkey_to_reference_shard_rebalance.referenced_table_15000000
|
||||
referencing_table_id_fkey_15000003 | fkey_to_reference_shard_rebalance.referencing_table_15000003 | fkey_to_reference_shard_rebalance.referenced_table_15000000
|
||||
|
@ -90,7 +96,7 @@ SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_to_reference_shard_r
|
|||
referencing_table_id_fkey_15000006 | fkey_to_reference_shard_rebalance.referencing_table_15000006 | fkey_to_reference_shard_rebalance.referenced_table_15000000
|
||||
referencing_table_id_fkey_15000007 | fkey_to_reference_shard_rebalance.referencing_table_15000007 | fkey_to_reference_shard_rebalance.referenced_table_15000000
|
||||
referencing_table_id_fkey_15000008 | fkey_to_reference_shard_rebalance.referencing_table_15000008 | fkey_to_reference_shard_rebalance.referenced_table_15000000
|
||||
(24 rows)
|
||||
(30 rows)
|
||||
|
||||
SELECT master_move_shard_placement(15000009, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'block_writes');
|
||||
master_move_shard_placement
|
||||
|
@ -109,6 +115,8 @@ NOTICE: cleaned up 2 orphaned shards
|
|||
SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_to_reference_shard_rebalance.%' AND refd_relid LIKE 'fkey_to_reference_shard_rebalance.%' ORDER BY 1,2,3;
|
||||
name | relid | refd_relid
|
||||
---------------------------------------------------------------------
|
||||
referencing_table2_id_fkey | fkey_to_reference_shard_rebalance.referencing_table2 | fkey_to_reference_shard_rebalance.referencing_table
|
||||
referencing_table2_id_fkey | fkey_to_reference_shard_rebalance.referencing_table2 | fkey_to_reference_shard_rebalance.referencing_table
|
||||
referencing_table2_id_fkey_15000009 | fkey_to_reference_shard_rebalance.referencing_table2_15000009 | fkey_to_reference_shard_rebalance.referencing_table_15000001
|
||||
referencing_table2_id_fkey_15000010 | fkey_to_reference_shard_rebalance.referencing_table2_15000010 | fkey_to_reference_shard_rebalance.referencing_table_15000002
|
||||
referencing_table2_id_fkey_15000011 | fkey_to_reference_shard_rebalance.referencing_table2_15000011 | fkey_to_reference_shard_rebalance.referencing_table_15000003
|
||||
|
@ -117,6 +125,8 @@ SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_to_reference_shard_r
|
|||
referencing_table2_id_fkey_15000014 | fkey_to_reference_shard_rebalance.referencing_table2_15000014 | fkey_to_reference_shard_rebalance.referencing_table_15000006
|
||||
referencing_table2_id_fkey_15000015 | fkey_to_reference_shard_rebalance.referencing_table2_15000015 | fkey_to_reference_shard_rebalance.referencing_table_15000007
|
||||
referencing_table2_id_fkey_15000016 | fkey_to_reference_shard_rebalance.referencing_table2_15000016 | fkey_to_reference_shard_rebalance.referencing_table_15000008
|
||||
referencing_table2_ref_id_fkey | fkey_to_reference_shard_rebalance.referencing_table2 | fkey_to_reference_shard_rebalance.referenced_table
|
||||
referencing_table2_ref_id_fkey | fkey_to_reference_shard_rebalance.referencing_table2 | fkey_to_reference_shard_rebalance.referenced_table
|
||||
referencing_table2_ref_id_fkey_15000009 | fkey_to_reference_shard_rebalance.referencing_table2_15000009 | fkey_to_reference_shard_rebalance.referenced_table_15000000
|
||||
referencing_table2_ref_id_fkey_15000010 | fkey_to_reference_shard_rebalance.referencing_table2_15000010 | fkey_to_reference_shard_rebalance.referenced_table_15000000
|
||||
referencing_table2_ref_id_fkey_15000011 | fkey_to_reference_shard_rebalance.referencing_table2_15000011 | fkey_to_reference_shard_rebalance.referenced_table_15000000
|
||||
|
@ -125,6 +135,8 @@ SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_to_reference_shard_r
|
|||
referencing_table2_ref_id_fkey_15000014 | fkey_to_reference_shard_rebalance.referencing_table2_15000014 | fkey_to_reference_shard_rebalance.referenced_table_15000000
|
||||
referencing_table2_ref_id_fkey_15000015 | fkey_to_reference_shard_rebalance.referencing_table2_15000015 | fkey_to_reference_shard_rebalance.referenced_table_15000000
|
||||
referencing_table2_ref_id_fkey_15000016 | fkey_to_reference_shard_rebalance.referencing_table2_15000016 | fkey_to_reference_shard_rebalance.referenced_table_15000000
|
||||
referencing_table_id_fkey | fkey_to_reference_shard_rebalance.referencing_table | fkey_to_reference_shard_rebalance.referenced_table
|
||||
referencing_table_id_fkey | fkey_to_reference_shard_rebalance.referencing_table | fkey_to_reference_shard_rebalance.referenced_table
|
||||
referencing_table_id_fkey_15000001 | fkey_to_reference_shard_rebalance.referencing_table_15000001 | fkey_to_reference_shard_rebalance.referenced_table_15000000
|
||||
referencing_table_id_fkey_15000002 | fkey_to_reference_shard_rebalance.referencing_table_15000002 | fkey_to_reference_shard_rebalance.referenced_table_15000000
|
||||
referencing_table_id_fkey_15000003 | fkey_to_reference_shard_rebalance.referencing_table_15000003 | fkey_to_reference_shard_rebalance.referenced_table_15000000
|
||||
|
@ -133,7 +145,7 @@ SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_to_reference_shard_r
|
|||
referencing_table_id_fkey_15000006 | fkey_to_reference_shard_rebalance.referencing_table_15000006 | fkey_to_reference_shard_rebalance.referenced_table_15000000
|
||||
referencing_table_id_fkey_15000007 | fkey_to_reference_shard_rebalance.referencing_table_15000007 | fkey_to_reference_shard_rebalance.referenced_table_15000000
|
||||
referencing_table_id_fkey_15000008 | fkey_to_reference_shard_rebalance.referencing_table_15000008 | fkey_to_reference_shard_rebalance.referenced_table_15000000
|
||||
(24 rows)
|
||||
(30 rows)
|
||||
|
||||
-- create a function to show the
|
||||
CREATE FUNCTION get_foreign_key_to_reference_table_commands(Oid)
|
||||
|
|
|
@ -129,6 +129,7 @@ SELECT * FROM create_distributed_table('rep1', 'id');
|
|||
-- Add the coordinator, so we can have a replicated shard
|
||||
SELECT 1 FROM citus_add_node('localhost', :master_port, 0);
|
||||
NOTICE: Replicating reference table "ref" to the node localhost:xxxxx
|
||||
NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
|
@ -249,12 +250,18 @@ SET LOCAL citus.log_local_commands TO ON;
|
|||
INSERT INTO rep1 VALUES (1);
|
||||
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing SELECT lock_shard_resources(3, ARRAY[92448300])
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing INSERT INTO ignoring_orphaned_shards.rep1_92448300 (id) VALUES (1)
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: executing the command locally: INSERT INTO ignoring_orphaned_shards.rep1_92448300 (id) VALUES (1)
|
||||
ROLLBACK;
|
||||
NOTICE: issuing ROLLBACK
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing ROLLBACK
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
-- Cause the orphaned shard to be local
|
||||
SELECT 1 FROM citus_drain_node('localhost', :master_port);
|
||||
NOTICE: Moving shard xxxxx from localhost:xxxxx to localhost:xxxxx ...
|
||||
|
@ -278,12 +285,14 @@ SET LOCAL citus.log_local_commands TO ON;
|
|||
INSERT INTO rep1 VALUES (1);
|
||||
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing INSERT INTO ignoring_orphaned_shards.rep1_92448300 (id) VALUES (1)
|
||||
NOTICE: issuing SELECT lock_shard_resources(3, ARRAY[92448300])
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing INSERT INTO ignoring_orphaned_shards.rep1_92448300 (id) VALUES (1)
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing INSERT INTO ignoring_orphaned_shards.rep1_92448300 (id) VALUES (1)
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
ROLLBACK;
|
||||
NOTICE: issuing ROLLBACK
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
|
|
|
@ -1,16 +1,11 @@
|
|||
Parsed test spec with 2 sessions
|
||||
|
||||
starting permutation: s1-begin s1-add-node-1 s2-remove-node-1 s1-commit s1-show-nodes
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
|
@ -18,10 +13,10 @@ step s1-add-node-1:
|
|||
(1 row)
|
||||
|
||||
step s2-remove-node-1:
|
||||
SELECT * FROM master_remove_node('localhost', 57637);
|
||||
SELECT * FROM master_remove_node('localhost', 57637);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
step s2-remove-node-1: <... completed>
|
||||
master_remove_node
|
||||
|
@ -30,7 +25,7 @@ master_remove_node
|
|||
(1 row)
|
||||
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename|nodeport|isactive
|
||||
---------------------------------------------------------------------
|
||||
|
@ -42,16 +37,11 @@ master_remove_node
|
|||
|
||||
|
||||
starting permutation: s1-begin s1-add-node-1 s2-add-node-2 s1-commit s1-show-nodes
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
|
@ -59,10 +49,10 @@ step s1-add-node-1:
|
|||
(1 row)
|
||||
|
||||
step s2-add-node-2:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
step s2-add-node-2: <... completed>
|
||||
?column?
|
||||
|
@ -71,7 +61,7 @@ step s2-add-node-2: <... completed>
|
|||
(1 row)
|
||||
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename |nodeport|isactive
|
||||
---------------------------------------------------------------------
|
||||
|
@ -87,16 +77,11 @@ master_remove_node
|
|||
|
||||
|
||||
starting permutation: s1-begin s1-add-node-1 s2-add-node-1 s1-commit s1-show-nodes
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
|
@ -104,10 +89,10 @@ step s1-add-node-1:
|
|||
(1 row)
|
||||
|
||||
step s2-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
step s2-add-node-1: <... completed>
|
||||
?column?
|
||||
|
@ -116,7 +101,7 @@ step s2-add-node-1: <... completed>
|
|||
(1 row)
|
||||
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename |nodeport|isactive
|
||||
---------------------------------------------------------------------
|
||||
|
@ -130,16 +115,11 @@ master_remove_node
|
|||
|
||||
|
||||
starting permutation: s1-begin s1-add-node-1 s2-add-node-2 s1-abort s1-show-nodes
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
|
@ -147,10 +127,10 @@ step s1-add-node-1:
|
|||
(1 row)
|
||||
|
||||
step s2-add-node-2:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
<waiting ...>
|
||||
step s1-abort:
|
||||
ABORT;
|
||||
ABORT;
|
||||
|
||||
step s2-add-node-2: <... completed>
|
||||
?column?
|
||||
|
@ -159,7 +139,7 @@ step s2-add-node-2: <... completed>
|
|||
(1 row)
|
||||
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename |nodeport|isactive
|
||||
---------------------------------------------------------------------
|
||||
|
@ -173,16 +153,11 @@ master_remove_node
|
|||
|
||||
|
||||
starting permutation: s1-begin s1-add-node-1 s2-add-node-1 s1-abort s1-show-nodes
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
|
@ -190,10 +165,10 @@ step s1-add-node-1:
|
|||
(1 row)
|
||||
|
||||
step s2-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
<waiting ...>
|
||||
step s1-abort:
|
||||
ABORT;
|
||||
ABORT;
|
||||
|
||||
step s2-add-node-1: <... completed>
|
||||
?column?
|
||||
|
@ -202,7 +177,7 @@ step s2-add-node-1: <... completed>
|
|||
(1 row)
|
||||
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename |nodeport|isactive
|
||||
---------------------------------------------------------------------
|
||||
|
@ -216,13 +191,8 @@ master_remove_node
|
|||
|
||||
|
||||
starting permutation: s1-add-node-1 s1-add-node-2 s1-begin s1-remove-node-1 s2-remove-node-2 s1-commit s1-show-nodes
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
|
@ -230,7 +200,7 @@ step s1-add-node-1:
|
|||
(1 row)
|
||||
|
||||
step s1-add-node-2:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
|
@ -238,10 +208,10 @@ step s1-add-node-2:
|
|||
(1 row)
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
|
||||
step s1-remove-node-1:
|
||||
SELECT * FROM master_remove_node('localhost', 57637);
|
||||
SELECT * FROM master_remove_node('localhost', 57637);
|
||||
|
||||
master_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
@ -249,10 +219,10 @@ master_remove_node
|
|||
(1 row)
|
||||
|
||||
step s2-remove-node-2:
|
||||
SELECT * FROM master_remove_node('localhost', 57638);
|
||||
SELECT * FROM master_remove_node('localhost', 57638);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
step s2-remove-node-2: <... completed>
|
||||
master_remove_node
|
||||
|
@ -261,7 +231,7 @@ master_remove_node
|
|||
(1 row)
|
||||
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename|nodeport|isactive
|
||||
---------------------------------------------------------------------
|
||||
|
@ -273,13 +243,8 @@ master_remove_node
|
|||
|
||||
|
||||
starting permutation: s1-add-node-1 s1-begin s1-remove-node-1 s2-remove-node-1 s1-commit s1-show-nodes
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
|
@ -287,10 +252,10 @@ step s1-add-node-1:
|
|||
(1 row)
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
|
||||
step s1-remove-node-1:
|
||||
SELECT * FROM master_remove_node('localhost', 57637);
|
||||
SELECT * FROM master_remove_node('localhost', 57637);
|
||||
|
||||
master_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
@ -298,15 +263,15 @@ master_remove_node
|
|||
(1 row)
|
||||
|
||||
step s2-remove-node-1:
|
||||
SELECT * FROM master_remove_node('localhost', 57637);
|
||||
SELECT * FROM master_remove_node('localhost', 57637);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
step s2-remove-node-1: <... completed>
|
||||
ERROR: node at "localhost:xxxxx" does not exist
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename|nodeport|isactive
|
||||
---------------------------------------------------------------------
|
||||
|
@ -318,13 +283,8 @@ master_remove_node
|
|||
|
||||
|
||||
starting permutation: s1-add-node-1 s1-begin s1-activate-node-1 s2-activate-node-1 s1-commit s1-show-nodes
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
|
@ -332,10 +292,10 @@ step s1-add-node-1:
|
|||
(1 row)
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
|
||||
step s1-activate-node-1:
|
||||
SELECT 1 FROM master_activate_node('localhost', 57637);
|
||||
SELECT 1 FROM master_activate_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
|
@ -343,10 +303,10 @@ step s1-activate-node-1:
|
|||
(1 row)
|
||||
|
||||
step s2-activate-node-1:
|
||||
SELECT 1 FROM master_activate_node('localhost', 57637);
|
||||
SELECT 1 FROM master_activate_node('localhost', 57637);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
step s2-activate-node-1: <... completed>
|
||||
?column?
|
||||
|
@ -355,7 +315,7 @@ step s2-activate-node-1: <... completed>
|
|||
(1 row)
|
||||
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename |nodeport|isactive
|
||||
---------------------------------------------------------------------
|
||||
|
@ -369,13 +329,8 @@ master_remove_node
|
|||
|
||||
|
||||
starting permutation: s1-add-node-1 s1-begin s1-disable-node-1 s2-disable-node-1 s1-commit s1-show-nodes
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
|
@ -383,21 +338,28 @@ step s1-add-node-1:
|
|||
(1 row)
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
|
||||
step s1-disable-node-1:
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s2-disable-node-1:
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
step s2-disable-node-1: <... completed>
|
||||
?column?
|
||||
|
@ -405,8 +367,13 @@ step s2-disable-node-1: <... completed>
|
|||
1
|
||||
(1 row)
|
||||
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename |nodeport|isactive
|
||||
---------------------------------------------------------------------
|
||||
|
@ -420,13 +387,8 @@ master_remove_node
|
|||
|
||||
|
||||
starting permutation: s1-add-inactive-1 s1-begin s1-activate-node-1 s2-activate-node-1 s1-commit s1-show-nodes
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
step s1-add-inactive-1:
|
||||
SELECT 1 FROM master_add_inactive_node('localhost', 57637);
|
||||
SELECT 1 FROM master_add_inactive_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
|
@ -434,10 +396,10 @@ step s1-add-inactive-1:
|
|||
(1 row)
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
|
||||
step s1-activate-node-1:
|
||||
SELECT 1 FROM master_activate_node('localhost', 57637);
|
||||
SELECT 1 FROM master_activate_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
|
@ -445,10 +407,10 @@ step s1-activate-node-1:
|
|||
(1 row)
|
||||
|
||||
step s2-activate-node-1:
|
||||
SELECT 1 FROM master_activate_node('localhost', 57637);
|
||||
SELECT 1 FROM master_activate_node('localhost', 57637);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
step s2-activate-node-1: <... completed>
|
||||
?column?
|
||||
|
@ -457,7 +419,7 @@ step s2-activate-node-1: <... completed>
|
|||
(1 row)
|
||||
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename |nodeport|isactive
|
||||
---------------------------------------------------------------------
|
||||
|
@ -471,13 +433,8 @@ master_remove_node
|
|||
|
||||
|
||||
starting permutation: s1-add-inactive-1 s1-begin s1-disable-node-1 s2-disable-node-1 s1-commit s1-show-nodes
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
step s1-add-inactive-1:
|
||||
SELECT 1 FROM master_add_inactive_node('localhost', 57637);
|
||||
SELECT 1 FROM master_add_inactive_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
|
@ -485,21 +442,28 @@ step s1-add-inactive-1:
|
|||
(1 row)
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
|
||||
step s1-disable-node-1:
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s2-disable-node-1:
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
step s2-disable-node-1: <... completed>
|
||||
?column?
|
||||
|
@ -507,8 +471,13 @@ step s2-disable-node-1: <... completed>
|
|||
1
|
||||
(1 row)
|
||||
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename |nodeport|isactive
|
||||
---------------------------------------------------------------------
|
||||
|
@ -522,13 +491,8 @@ master_remove_node
|
|||
|
||||
|
||||
starting permutation: s1-add-node-1 s1-begin s1-disable-node-1 s2-activate-node-1 s1-commit s1-show-nodes
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
|
@ -536,21 +500,27 @@ step s1-add-node-1:
|
|||
(1 row)
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
|
||||
step s1-disable-node-1:
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s2-activate-node-1:
|
||||
SELECT 1 FROM master_activate_node('localhost', 57637);
|
||||
SELECT 1 FROM master_activate_node('localhost', 57637);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
step s2-activate-node-1: <... completed>
|
||||
?column?
|
||||
|
@ -559,7 +529,7 @@ step s2-activate-node-1: <... completed>
|
|||
(1 row)
|
||||
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename |nodeport|isactive
|
||||
---------------------------------------------------------------------
|
||||
|
@ -573,13 +543,8 @@ master_remove_node
|
|||
|
||||
|
||||
starting permutation: s1-add-node-1 s1-begin s1-activate-node-1 s2-disable-node-1 s1-commit s1-show-nodes
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
|
@ -587,10 +552,10 @@ step s1-add-node-1:
|
|||
(1 row)
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
|
||||
step s1-activate-node-1:
|
||||
SELECT 1 FROM master_activate_node('localhost', 57637);
|
||||
SELECT 1 FROM master_activate_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
|
@ -598,10 +563,11 @@ step s1-activate-node-1:
|
|||
(1 row)
|
||||
|
||||
step s2-disable-node-1:
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
step s2-disable-node-1: <... completed>
|
||||
?column?
|
||||
|
@ -609,8 +575,13 @@ step s2-disable-node-1: <... completed>
|
|||
1
|
||||
(1 row)
|
||||
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename |nodeport|isactive
|
||||
---------------------------------------------------------------------
|
||||
|
@ -624,13 +595,8 @@ master_remove_node
|
|||
|
||||
|
||||
starting permutation: s1-add-inactive-1 s1-begin s1-disable-node-1 s2-activate-node-1 s1-commit s1-show-nodes
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
step s1-add-inactive-1:
|
||||
SELECT 1 FROM master_add_inactive_node('localhost', 57637);
|
||||
SELECT 1 FROM master_add_inactive_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
|
@ -638,21 +604,27 @@ step s1-add-inactive-1:
|
|||
(1 row)
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
|
||||
step s1-disable-node-1:
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s2-activate-node-1:
|
||||
SELECT 1 FROM master_activate_node('localhost', 57637);
|
||||
SELECT 1 FROM master_activate_node('localhost', 57637);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
step s2-activate-node-1: <... completed>
|
||||
?column?
|
||||
|
@ -661,7 +633,7 @@ step s2-activate-node-1: <... completed>
|
|||
(1 row)
|
||||
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename |nodeport|isactive
|
||||
---------------------------------------------------------------------
|
||||
|
@ -675,13 +647,8 @@ master_remove_node
|
|||
|
||||
|
||||
starting permutation: s1-add-inactive-1 s1-begin s1-activate-node-1 s2-disable-node-1 s1-commit s1-show-nodes
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
step s1-add-inactive-1:
|
||||
SELECT 1 FROM master_add_inactive_node('localhost', 57637);
|
||||
SELECT 1 FROM master_add_inactive_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
|
@ -689,10 +656,10 @@ step s1-add-inactive-1:
|
|||
(1 row)
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
|
||||
step s1-activate-node-1:
|
||||
SELECT 1 FROM master_activate_node('localhost', 57637);
|
||||
SELECT 1 FROM master_activate_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
|
@ -700,10 +667,11 @@ step s1-activate-node-1:
|
|||
(1 row)
|
||||
|
||||
step s2-disable-node-1:
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
step s2-disable-node-1: <... completed>
|
||||
?column?
|
||||
|
@ -711,59 +679,13 @@ step s2-disable-node-1: <... completed>
|
|||
1
|
||||
(1 row)
|
||||
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename |nodeport|isactive
|
||||
---------------------------------------------------------------------
|
||||
localhost| 57637|f
|
||||
(1 row)
|
||||
|
||||
master_remove_node
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
||||
starting permutation: s1-add-inactive-1 s1-begin s1-activate-node-1 s2-disable-node-1 s1-abort s1-show-nodes
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
step s1-add-inactive-1:
|
||||
SELECT 1 FROM master_add_inactive_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-activate-node-1:
|
||||
SELECT 1 FROM master_activate_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
step s2-disable-node-1:
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
<waiting ...>
|
||||
step s1-abort:
|
||||
ABORT;
|
||||
|
||||
step s2-disable-node-1: <... completed>
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename |nodeport|isactive
|
||||
---------------------------------------------------------------------
|
||||
|
@ -777,13 +699,8 @@ master_remove_node
|
|||
|
||||
|
||||
starting permutation: s1-add-node-1 s1-begin s1-disable-node-1 s2-disable-node-1 s1-abort s1-show-nodes
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
|
@ -791,21 +708,28 @@ step s1-add-node-1:
|
|||
(1 row)
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
|
||||
step s1-disable-node-1:
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s2-disable-node-1:
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
<waiting ...>
|
||||
step s1-abort:
|
||||
ABORT;
|
||||
ABORT;
|
||||
|
||||
step s2-disable-node-1: <... completed>
|
||||
?column?
|
||||
|
@ -813,8 +737,13 @@ step s2-disable-node-1: <... completed>
|
|||
1
|
||||
(1 row)
|
||||
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename |nodeport|isactive
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -3,9 +3,21 @@ Parsed test spec with 4 sessions
|
|||
starting permutation: s1-begin s2-begin s1-update s2-update detector-dump-wait-edges s1-abort s2-abort
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
|
||||
assign_distributed_transaction_id
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
SELECT assign_distributed_transaction_id(0, 9, '2021-07-09 15:41:55.542377+02');
|
||||
|
||||
assign_distributed_transaction_id
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-update:
|
||||
UPDATE distributed_table SET y = 1 WHERE x = 1;
|
||||
|
@ -28,13 +40,13 @@ step detector-dump-wait-edges:
|
|||
|
||||
waiting_transaction_num|blocking_transaction_num|blocking_transaction_waiting
|
||||
---------------------------------------------------------------------
|
||||
406| 405|f
|
||||
9| 8|f
|
||||
(1 row)
|
||||
|
||||
transactionnumber|waitingtransactionnumbers
|
||||
---------------------------------------------------------------------
|
||||
405|
|
||||
406| 405
|
||||
8|
|
||||
9| 8
|
||||
(2 rows)
|
||||
|
||||
step s1-abort:
|
||||
|
@ -53,12 +65,30 @@ restore_isolation_tester_func
|
|||
starting permutation: s1-begin s2-begin s3-begin s1-update s2-update s3-update detector-dump-wait-edges s1-abort s2-abort s3-abort
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
|
||||
assign_distributed_transaction_id
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
SELECT assign_distributed_transaction_id(0, 9, '2021-07-09 15:41:55.542377+02');
|
||||
|
||||
assign_distributed_transaction_id
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s3-begin:
|
||||
BEGIN;
|
||||
SELECT assign_distributed_transaction_id(0, 10, '2021-07-09 15:41:55.542377+02');
|
||||
|
||||
assign_distributed_transaction_id
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-update:
|
||||
UPDATE distributed_table SET y = 1 WHERE x = 1;
|
||||
|
@ -84,16 +114,16 @@ step detector-dump-wait-edges:
|
|||
|
||||
waiting_transaction_num|blocking_transaction_num|blocking_transaction_waiting
|
||||
---------------------------------------------------------------------
|
||||
410| 409|f
|
||||
411| 409|f
|
||||
411| 410|t
|
||||
9| 8|f
|
||||
10| 8|f
|
||||
10| 9|t
|
||||
(3 rows)
|
||||
|
||||
transactionnumber|waitingtransactionnumbers
|
||||
---------------------------------------------------------------------
|
||||
409|
|
||||
410|409
|
||||
411|409,410
|
||||
8|
|
||||
9|8
|
||||
10|8,9
|
||||
(3 rows)
|
||||
|
||||
step s1-abort:
|
||||
|
|
|
@ -1,104 +0,0 @@
|
|||
Parsed test spec with 4 sessions
|
||||
|
||||
starting permutation: s1-begin s2-begin s1-update s2-update detector-dump-wait-edges s1-abort s2-abort
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-update:
|
||||
UPDATE distributed_table SET y = 1 WHERE x = 1;
|
||||
|
||||
step s2-update:
|
||||
UPDATE distributed_table SET y = 2 WHERE x = 1;
|
||||
<waiting ...>
|
||||
step detector-dump-wait-edges:
|
||||
SELECT
|
||||
waiting_transaction_num,
|
||||
blocking_transaction_num,
|
||||
blocking_transaction_waiting
|
||||
FROM
|
||||
dump_global_wait_edges()
|
||||
ORDER BY
|
||||
waiting_transaction_num,
|
||||
blocking_transaction_num,
|
||||
blocking_transaction_waiting;
|
||||
SELECT * FROM get_adjacency_list_wait_graph() ORDER BY 1;
|
||||
|
||||
waiting_transaction_num|blocking_transaction_num|blocking_transaction_waiting
|
||||
---------------------------------------------------------------------
|
||||
406| 405|f
|
||||
(1 row)
|
||||
|
||||
transactionnumber|waitingtransactionnumbers
|
||||
---------------------------------------------------------------------
|
||||
405|
|
||||
406| 405
|
||||
(2 rows)
|
||||
|
||||
step s1-abort:
|
||||
ABORT;
|
||||
|
||||
step s2-update: <... completed>
|
||||
step s2-abort:
|
||||
ABORT;
|
||||
|
||||
|
||||
starting permutation: s1-begin s2-begin s3-begin s1-update s2-update s3-update detector-dump-wait-edges s1-abort s2-abort s3-abort
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s3-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-update:
|
||||
UPDATE distributed_table SET y = 1 WHERE x = 1;
|
||||
|
||||
step s2-update:
|
||||
UPDATE distributed_table SET y = 2 WHERE x = 1;
|
||||
<waiting ...>
|
||||
step s3-update:
|
||||
UPDATE distributed_table SET y = 3 WHERE x = 1;
|
||||
<waiting ...>
|
||||
step detector-dump-wait-edges:
|
||||
SELECT
|
||||
waiting_transaction_num,
|
||||
blocking_transaction_num,
|
||||
blocking_transaction_waiting
|
||||
FROM
|
||||
dump_global_wait_edges()
|
||||
ORDER BY
|
||||
waiting_transaction_num,
|
||||
blocking_transaction_num,
|
||||
blocking_transaction_waiting;
|
||||
SELECT * FROM get_adjacency_list_wait_graph() ORDER BY 1;
|
||||
|
||||
waiting_transaction_num|blocking_transaction_num|blocking_transaction_waiting
|
||||
---------------------------------------------------------------------
|
||||
410| 409|f
|
||||
411| 409|f
|
||||
411| 410|t
|
||||
(3 rows)
|
||||
|
||||
transactionnumber|waitingtransactionnumbers
|
||||
---------------------------------------------------------------------
|
||||
409|
|
||||
410|409
|
||||
411|409,410
|
||||
(3 rows)
|
||||
|
||||
step s1-abort:
|
||||
ABORT;
|
||||
|
||||
step s2-update: <... completed>
|
||||
step s2-abort:
|
||||
ABORT;
|
||||
|
||||
step s3-update: <... completed>
|
||||
step s3-abort:
|
||||
ABORT;
|
||||
|
File diff suppressed because it is too large
Load Diff
|
@ -3,12 +3,12 @@ Parsed test spec with 2 sessions
|
|||
starting permutation: s1-begin s1-update-node-1 s2-update-node-2 s1-commit s1-show-nodes
|
||||
nodeid|nodename |nodeport
|
||||
---------------------------------------------------------------------
|
||||
22|localhost| 57637
|
||||
23|localhost| 57638
|
||||
22|localhost| 57638
|
||||
21|localhost| 57637
|
||||
(2 rows)
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
|
||||
step s1-update-node-1:
|
||||
SELECT 1 FROM master_update_node(
|
||||
|
@ -28,7 +28,7 @@ step s2-update-node-2:
|
|||
58638);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
step s2-update-node-2: <... completed>
|
||||
?column?
|
||||
|
@ -43,8 +43,8 @@ step s1-show-nodes:
|
|||
|
||||
nodeid|nodename |nodeport|isactive
|
||||
---------------------------------------------------------------------
|
||||
22|localhost| 58637|t
|
||||
23|localhost| 58638|t
|
||||
21|localhost| 58637|t
|
||||
22|localhost| 58638|t
|
||||
(2 rows)
|
||||
|
||||
nodeid|nodename|nodeport
|
||||
|
@ -55,12 +55,12 @@ nodeid|nodename|nodeport
|
|||
starting permutation: s1-begin s1-update-node-1 s2-begin s2-update-node-1 s1-commit s2-abort s1-show-nodes
|
||||
nodeid|nodename |nodeport
|
||||
---------------------------------------------------------------------
|
||||
24|localhost| 57637
|
||||
25|localhost| 57638
|
||||
24|localhost| 57638
|
||||
23|localhost| 57637
|
||||
(2 rows)
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
|
||||
step s1-update-node-1:
|
||||
SELECT 1 FROM master_update_node(
|
||||
|
@ -74,7 +74,7 @@ step s1-update-node-1:
|
|||
(1 row)
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
|
||||
step s2-update-node-1:
|
||||
SELECT 1 FROM master_update_node(
|
||||
|
@ -83,7 +83,7 @@ step s2-update-node-1:
|
|||
58637);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
step s2-update-node-1: <... completed>
|
||||
?column?
|
||||
|
@ -92,7 +92,7 @@ step s2-update-node-1: <... completed>
|
|||
(1 row)
|
||||
|
||||
step s2-abort:
|
||||
ABORT;
|
||||
ABORT;
|
||||
|
||||
step s1-show-nodes:
|
||||
SELECT nodeid, nodename, nodeport, isactive
|
||||
|
@ -101,8 +101,8 @@ step s1-show-nodes:
|
|||
|
||||
nodeid|nodename |nodeport|isactive
|
||||
---------------------------------------------------------------------
|
||||
25|localhost| 57638|t
|
||||
24|localhost| 58637|t
|
||||
24|localhost| 57638|t
|
||||
23|localhost| 58637|t
|
||||
(2 rows)
|
||||
|
||||
nodeid|nodename|nodeport
|
||||
|
@ -113,12 +113,12 @@ nodeid|nodename|nodeport
|
|||
starting permutation: s1-begin s1-update-node-1 s2-start-metadata-sync-node-2 s1-commit s2-verify-metadata
|
||||
nodeid|nodename |nodeport
|
||||
---------------------------------------------------------------------
|
||||
26|localhost| 57637
|
||||
27|localhost| 57638
|
||||
26|localhost| 57638
|
||||
25|localhost| 57637
|
||||
(2 rows)
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
|
||||
step s1-update-node-1:
|
||||
SELECT 1 FROM master_update_node(
|
||||
|
@ -135,7 +135,7 @@ step s2-start-metadata-sync-node-2:
|
|||
SELECT start_metadata_sync_to_node('localhost', 57638);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
step s2-start-metadata-sync-node-2: <... completed>
|
||||
start_metadata_sync_to_node
|
||||
|
@ -152,13 +152,13 @@ step s2-verify-metadata:
|
|||
|
||||
nodeid|groupid|nodename |nodeport
|
||||
---------------------------------------------------------------------
|
||||
26| 26|localhost| 58637
|
||||
27| 27|localhost| 57638
|
||||
25| 25|localhost| 58637
|
||||
26| 26|localhost| 57638
|
||||
(2 rows)
|
||||
|
||||
master_run_on_worker
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57638,t,"[{""f1"": 26, ""f2"": 26, ""f3"": ""localhost"", ""f4"": 58637}, {""f1"": 27, ""f2"": 27, ""f3"": ""localhost"", ""f4"": 57638}]")
|
||||
(localhost,57638,t,"[{""f1"": 25, ""f2"": 25, ""f3"": ""localhost"", ""f4"": 58637}, {""f1"": 26, ""f2"": 26, ""f3"": ""localhost"", ""f4"": 57638}]")
|
||||
(1 row)
|
||||
|
||||
nodeid|nodename|nodeport
|
||||
|
@ -169,8 +169,8 @@ nodeid|nodename|nodeport
|
|||
starting permutation: s2-create-table s1-begin s1-update-node-nonexistent s1-prepare-transaction s2-cache-prepared-statement s1-commit-prepared s2-execute-prepared s1-update-node-existent s2-drop-table
|
||||
nodeid|nodename |nodeport
|
||||
---------------------------------------------------------------------
|
||||
28|localhost| 57637
|
||||
29|localhost| 57638
|
||||
28|localhost| 57638
|
||||
27|localhost| 57637
|
||||
(2 rows)
|
||||
|
||||
step s2-create-table:
|
||||
|
@ -183,7 +183,7 @@ create_distributed_table
|
|||
(1 row)
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
BEGIN;
|
||||
|
||||
step s1-update-node-nonexistent:
|
||||
SELECT 1 FROM master_update_node(
|
||||
|
|
|
@ -57,7 +57,7 @@ SELECT master_get_active_worker_nodes();
|
|||
(localhost,57637)
|
||||
(1 row)
|
||||
|
||||
-- try to disable a node with no placements see that node is removed
|
||||
-- try to disable a node with no placements see that node is s=removed
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker
|
||||
DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created
|
||||
|
@ -66,8 +66,14 @@ DETAIL: distributed objects are only kept in sync when citus.enable_object_prop
|
|||
1
|
||||
(1 row)
|
||||
|
||||
SELECT master_disable_node('localhost', :worker_2_port);
|
||||
master_disable_node
|
||||
SELECT citus_disable_node('localhost', :worker_2_port);
|
||||
citus_disable_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
@ -144,7 +150,7 @@ SELECT master_get_active_worker_nodes();
|
|||
(localhost,57637)
|
||||
(2 rows)
|
||||
|
||||
-- insert a row so that master_disable_node() exercises closing connections
|
||||
-- insert a row so that citus_disable_node() exercises closing connections
|
||||
CREATE TABLE test_reference_table (y int primary key, name text);
|
||||
SELECT create_reference_table('test_reference_table');
|
||||
create_reference_table
|
||||
|
@ -158,29 +164,27 @@ SELECT citus_remove_node('localhost', :worker_2_port);
|
|||
ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx
|
||||
DETAIL: One of the table(s) that prevents the operation complete successfully is public.cluster_management_test
|
||||
HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables
|
||||
-- try to disable a node with active placements see that node is removed
|
||||
-- observe that a notification is displayed
|
||||
SELECT master_disable_node('localhost', :worker_2_port);
|
||||
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT citus_activate_node('localhost', 57638) to activate this node back.
|
||||
master_disable_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- try to disable a node with active placements
|
||||
-- which should fail because there are some placements
|
||||
-- which are the only placements for a given shard
|
||||
SELECT citus_disable_node('localhost', :worker_2_port);
|
||||
ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx
|
||||
DETAIL: One of the table(s) that prevents the operation complete successfully is public.cluster_management_test
|
||||
HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables
|
||||
SELECT master_get_active_worker_nodes();
|
||||
master_get_active_worker_nodes
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57638)
|
||||
(localhost,57637)
|
||||
(1 row)
|
||||
(2 rows)
|
||||
|
||||
-- try to disable a node which does not exist and see that an error is thrown
|
||||
SELECT master_disable_node('localhost.noexist', 2345);
|
||||
SELECT citus_disable_node('localhost.noexist', 2345);
|
||||
ERROR: node at "localhost.noexist:2345" does not exist
|
||||
-- drop the table without leaving a shard placement behind (messes up other tests)
|
||||
SELECT master_activate_node('localhost', :worker_2_port);
|
||||
WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker
|
||||
DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created
|
||||
NOTICE: Replicating reference table "test_reference_table" to the node localhost:xxxxx
|
||||
master_activate_node
|
||||
---------------------------------------------------------------------
|
||||
3
|
||||
|
@ -205,7 +209,8 @@ GRANT EXECUTE ON FUNCTION master_activate_node(text,int) TO node_metadata_user;
|
|||
GRANT EXECUTE ON FUNCTION master_add_inactive_node(text,int,int,noderole,name) TO node_metadata_user;
|
||||
GRANT EXECUTE ON FUNCTION master_add_node(text,int,int,noderole,name) TO node_metadata_user;
|
||||
GRANT EXECUTE ON FUNCTION master_add_secondary_node(text,int,text,int,name) TO node_metadata_user;
|
||||
GRANT EXECUTE ON FUNCTION master_disable_node(text,int) TO node_metadata_user;
|
||||
GRANT EXECUTE ON FUNCTION citus_disable_node(text,int,bool) TO node_metadata_user;
|
||||
GRANT EXECUTE ON FUNCTION citus_disable_node_and_wait(text,int,bool) TO node_metadata_user;
|
||||
GRANT EXECUTE ON FUNCTION master_remove_node(text,int) TO node_metadata_user;
|
||||
GRANT EXECUTE ON FUNCTION master_update_node(int,text,int,bool,int) TO node_metadata_user;
|
||||
-- user needs permission for the pg_dist_node and pg_dist_local_group for metadata syncing
|
||||
|
@ -237,8 +242,8 @@ SELECT 1 FROM master_add_inactive_node('localhost', :worker_2_port + 1);
|
|||
ERROR: permission denied for function master_add_inactive_node
|
||||
SELECT 1 FROM master_activate_node('localhost', :worker_2_port + 1);
|
||||
ERROR: permission denied for function master_activate_node
|
||||
SELECT 1 FROM master_disable_node('localhost', :worker_2_port + 1);
|
||||
ERROR: permission denied for function master_disable_node
|
||||
SELECT 1 FROM citus_disable_node('localhost', :worker_2_port + 1);
|
||||
ERROR: permission denied for function citus_disable_node
|
||||
SELECT 1 FROM master_remove_node('localhost', :worker_2_port + 1);
|
||||
ERROR: permission denied for function master_remove_node
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port + 1);
|
||||
|
@ -265,12 +270,6 @@ DETAIL: distributed objects are only kept in sync when citus.enable_object_prop
|
|||
1
|
||||
(1 row)
|
||||
|
||||
SELECT 1 FROM master_disable_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT 1 FROM master_remove_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
|
@ -409,6 +408,22 @@ WARNING: could not find any shard placements for shardId 1220025
|
|||
WARNING: could not find any shard placements for shardId 1220027
|
||||
WARNING: could not find any shard placements for shardId 1220029
|
||||
WARNING: could not find any shard placements for shardId 1220031
|
||||
WARNING: could not find any shard placements for shardId 1220017
|
||||
WARNING: could not find any shard placements for shardId 1220017
|
||||
WARNING: could not find any shard placements for shardId 1220017
|
||||
WARNING: could not find any shard placements for shardId 1220017
|
||||
WARNING: could not find any shard placements for shardId 1220017
|
||||
WARNING: could not find any shard placements for shardId 1220017
|
||||
WARNING: could not find any shard placements for shardId 1220017
|
||||
WARNING: could not find any shard placements for shardId 1220017
|
||||
WARNING: could not find any shard placements for shardId 1220017
|
||||
WARNING: could not find any shard placements for shardId 1220017
|
||||
WARNING: could not find any shard placements for shardId 1220017
|
||||
WARNING: could not find any shard placements for shardId 1220017
|
||||
WARNING: could not find any shard placements for shardId 1220017
|
||||
WARNING: could not find any shard placements for shardId 1220017
|
||||
WARNING: could not find any shard placements for shardId 1220017
|
||||
WARNING: could not find any shard placements for shardId 1220017
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -833,8 +848,14 @@ SELECT master_activate_node('localhost', 9999);
|
|||
22
|
||||
(1 row)
|
||||
|
||||
SELECT master_disable_node('localhost', 9999);
|
||||
master_disable_node
|
||||
SELECT citus_disable_node('localhost', 9999);
|
||||
citus_disable_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
|
|
@ -968,10 +968,12 @@ ALTER EXTENSION citus UPDATE TO '11.0-1';
|
|||
SELECT * FROM multi_extension.print_extension_changes();
|
||||
previous_object | current_object
|
||||
---------------------------------------------------------------------
|
||||
function citus_disable_node(text,integer) void |
|
||||
function master_append_table_to_shard(bigint,text,text,integer) real |
|
||||
function master_apply_delete_command(text) integer |
|
||||
function master_get_table_metadata(text) record |
|
||||
(3 rows)
|
||||
| function citus_disable_node(text,integer,boolean) void
|
||||
(5 rows)
|
||||
|
||||
DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff;
|
||||
-- show running version
|
||||
|
|
|
@ -1418,6 +1418,9 @@ CREATE TABLE tmp_placement AS
|
|||
DELETE FROM pg_dist_placement
|
||||
WHERE groupid = :old_worker_2_group;
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
WARNING: could not find any shard placements for shardId 1310001
|
||||
WARNING: could not find any shard placements for shardId 1310021
|
||||
WARNING: could not find any shard placements for shardId 1310026
|
||||
master_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -1585,14 +1588,17 @@ HINT: If the node is up, wait until metadata gets synced to it and try again.
|
|||
SELECT master_add_node('localhost', :master_port, groupid => 0);
|
||||
ERROR: localhost:xxxxx is a metadata node, but is out of sync
|
||||
HINT: If the node is up, wait until metadata gets synced to it and try again.
|
||||
SELECT master_disable_node('localhost', :worker_1_port);
|
||||
ERROR: Disabling localhost:xxxxx failed
|
||||
DETAIL: localhost:xxxxx is a metadata node, but is out of sync
|
||||
HINT: If you are using MX, try stop_metadata_sync_to_node(hostname, port) for nodes that are down before disabling them.
|
||||
SELECT master_disable_node('localhost', :worker_2_port);
|
||||
ERROR: Disabling localhost:xxxxx failed
|
||||
DETAIL: localhost:xxxxx is a metadata node, but is out of sync
|
||||
HINT: If you are using MX, try stop_metadata_sync_to_node(hostname, port) for nodes that are down before disabling them.
|
||||
SELECT citus_disable_node_and_wait('localhost', :worker_1_port);
|
||||
ERROR: disabling the first worker node in the metadata is not allowed
|
||||
HINT: You can force disabling node, but this operation might cause replicated shards to diverge: SELECT citus_disable_node('localhost', 57637, force:=true);
|
||||
CONTEXT: SQL statement "SELECT pg_catalog.citus_disable_node(nodename, nodeport, force)"
|
||||
PL/pgSQL function citus_disable_node_and_wait(text,integer,boolean) line XX at PERFORM
|
||||
SELECT citus_disable_node_and_wait('localhost', :worker_2_port);
|
||||
ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx
|
||||
DETAIL: One of the table(s) that prevents the operation complete successfully is mx_testing_schema.mx_test_table
|
||||
HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables
|
||||
CONTEXT: SQL statement "SELECT pg_catalog.citus_disable_node(nodename, nodeport, force)"
|
||||
PL/pgSQL function citus_disable_node_and_wait(text,integer,boolean) line XX at PERFORM
|
||||
SELECT master_remove_node('localhost', :worker_1_port);
|
||||
ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx
|
||||
DETAIL: One of the table(s) that prevents the operation complete successfully is mx_testing_schema.mx_test_table
|
||||
|
|
|
@ -300,7 +300,7 @@ alter table local_fkey_table ALTER COLUMN id TYPE int;
|
|||
SET citus.force_max_query_parallelization TO ON;
|
||||
alter table distributed_table ALTER COLUMN value_1 TYPE bigint;
|
||||
alter table distributed_table ALTER COLUMN value_1 TYPE int;
|
||||
SET client_min_messages TO error;
|
||||
SET client_min_messages TO ERROR;
|
||||
DROP TABLE distributed_table, referece_table, local_fkey_table;
|
||||
SELECT master_remove_node('localhost', :master_port);
|
||||
master_remove_node
|
||||
|
|
|
@ -46,7 +46,7 @@ SELECT master_add_node('localhost', :worker_1_port) As nodeid_1 \gset
|
|||
SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node;
|
||||
nodeid | nodename | nodeport | hasmetadata | metadatasynced
|
||||
---------------------------------------------------------------------
|
||||
2 | localhost | 57637 | f | f
|
||||
2 | localhost | 57637 | t | t
|
||||
(1 row)
|
||||
|
||||
-- create couple of tables
|
||||
|
@ -83,7 +83,7 @@ SELECT 1 FROM master_update_node((SELECT nodeid FROM pg_dist_node),
|
|||
SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node;
|
||||
nodeid | nodename | nodeport | hasmetadata | metadatasynced
|
||||
---------------------------------------------------------------------
|
||||
2 | localhost | 57638 | f | f
|
||||
2 | localhost | 57638 | t | f
|
||||
(1 row)
|
||||
|
||||
-- start syncing metadata to the node
|
||||
|
@ -641,10 +641,11 @@ SELECT verify_metadata('localhost', :worker_1_port),
|
|||
---------------------------------------------------------------------
|
||||
-- Don't drop the reference table so it has shards on the nodes being disabled
|
||||
DROP TABLE dist_table_1, dist_table_2;
|
||||
SELECT 1 FROM master_disable_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
SELECT pg_catalog.citus_disable_node_and_wait('localhost', :worker_2_port);
|
||||
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT citus_activate_node('localhost', 57638) to activate this node back.
|
||||
citus_disable_node_and_wait
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT verify_metadata('localhost', :worker_1_port);
|
||||
|
@ -666,7 +667,7 @@ SELECT verify_metadata('localhost', :worker_1_port);
|
|||
(1 row)
|
||||
|
||||
---------------------------------------------------------------------
|
||||
-- Test master_disable_node() when the node that is being disabled is actually down
|
||||
-- Test citus_disable_node_and_wait() when the node that is being disabled is actually down
|
||||
---------------------------------------------------------------------
|
||||
SELECT master_update_node(:nodeid_2, 'localhost', 1);
|
||||
master_update_node
|
||||
|
@ -682,22 +683,9 @@ SELECT wait_until_metadata_sync(30000);
|
|||
|
||||
-- set metadatasynced so we try porpagating metadata changes
|
||||
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid IN (:nodeid_1, :nodeid_2);
|
||||
-- should not error out, master_disable_node is tolerant for node failures
|
||||
SELECT 1 FROM master_disable_node('localhost', 1);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- try again after stopping metadata sync
|
||||
SELECT stop_metadata_sync_to_node('localhost', 1);
|
||||
NOTICE: dropping metadata on the node (localhost,1)
|
||||
stop_metadata_sync_to_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT 1 FROM master_disable_node('localhost', 1);
|
||||
-- should not error out, citus_disable_node is tolerant for node failures
|
||||
-- but we should not wait metadata syncing to finish as this node is down
|
||||
SELECT 1 FROM citus_disable_node('localhost', 1, true);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
|
@ -734,7 +722,7 @@ SELECT verify_metadata('localhost', :worker_1_port);
|
|||
(1 row)
|
||||
|
||||
---------------------------------------------------------------------
|
||||
-- Test master_disable_node() when the other node is down
|
||||
-- Test citus_disable_node_and_wait() when the other node is down
|
||||
---------------------------------------------------------------------
|
||||
-- node 1 is down.
|
||||
SELECT master_update_node(:nodeid_1, 'localhost', 1);
|
||||
|
@ -751,9 +739,14 @@ SELECT wait_until_metadata_sync(30000);
|
|||
|
||||
-- set metadatasynced so we try porpagating metadata changes
|
||||
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid IN (:nodeid_1, :nodeid_2);
|
||||
-- should error out
|
||||
SELECT 1 FROM master_disable_node('localhost', :worker_2_port);
|
||||
ERROR: Disabling localhost:xxxxx failed
|
||||
-- should not error out, citus_disable_node is tolerant for node failures
|
||||
-- but we should not wait metadata syncing to finish as this node is down
|
||||
SELECT 1 FROM citus_disable_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- try again after stopping metadata sync
|
||||
SELECT stop_metadata_sync_to_node('localhost', 1);
|
||||
NOTICE: dropping metadata on the node (localhost,1)
|
||||
|
@ -762,7 +755,7 @@ NOTICE: dropping metadata on the node (localhost,1)
|
|||
|
||||
(1 row)
|
||||
|
||||
SELECT 1 FROM master_disable_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM citus_disable_node_and_wait('localhost', :worker_2_port);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
|
|
|
@ -87,8 +87,14 @@ SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group;
|
|||
(1 row)
|
||||
|
||||
-- make sure when we disable a secondary we don't remove any placements
|
||||
SELECT master_disable_node('localhost', 9001);
|
||||
master_disable_node
|
||||
SELECT citus_disable_node('localhost', 9001);
|
||||
citus_disable_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
@ -247,8 +253,14 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
|||
(1 row)
|
||||
|
||||
-- try to disable the node before removing it (this used to crash)
|
||||
SELECT master_disable_node('localhost', :worker_2_port);
|
||||
master_disable_node
|
||||
SELECT citus_disable_node('localhost', :worker_2_port);
|
||||
citus_disable_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
@ -916,8 +928,8 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
|||
1
|
||||
(1 row)
|
||||
|
||||
-- test with master_disable_node
|
||||
-- status before master_disable_node
|
||||
-- test with citus_disable_node_and_wait
|
||||
-- status before citus_disable_node_and_wait
|
||||
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
|
@ -966,13 +978,19 @@ ORDER BY shardid ASC;
|
|||
(0 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
SELECT master_disable_node('localhost', :worker_2_port);
|
||||
master_disable_node
|
||||
SELECT citus_disable_node('localhost', :worker_2_port);
|
||||
citus_disable_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- status after master_disable_node
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- status after citus_disable_node_and_wait
|
||||
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -21,6 +21,14 @@ ERROR: cannot calculate the size because relation 'non_distributed_table' is no
|
|||
SELECT citus_total_relation_size('non_distributed_table');
|
||||
ERROR: cannot calculate the size because relation 'non_distributed_table' is not distributed
|
||||
DROP TABLE non_distributed_table;
|
||||
-- fix broken placements via disabling the node
|
||||
SET client_min_messages TO ERROR;
|
||||
SELECT replicate_table_shards('lineitem_hash_part', shard_replication_factor:=2, shard_transfer_mode:='block_writes');
|
||||
replicate_table_shards
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Tests on distributed table with replication factor > 1
|
||||
VACUUM (FULL) lineitem_hash_part;
|
||||
SELECT citus_table_size('lineitem_hash_part');
|
||||
|
@ -67,7 +75,7 @@ SELECT citus_table_size('customer_copy_hash'),
|
|||
citus_table_size('supplier');
|
||||
citus_table_size | citus_table_size | citus_table_size
|
||||
---------------------------------------------------------------------
|
||||
548864 | 548864 | 401408
|
||||
548864 | 548864 | 425984
|
||||
(1 row)
|
||||
|
||||
CREATE INDEX index_1 on customer_copy_hash(c_custkey);
|
||||
|
|
|
@ -53,3 +53,13 @@ CREATE OR REPLACE FUNCTION pg_catalog.partition_task_list_results(resultIdPrefix
|
|||
CREATE OR REPLACE FUNCTION top_transaction_context_size() RETURNS BIGINT
|
||||
LANGUAGE C STRICT VOLATILE
|
||||
AS 'citus', $$top_transaction_context_size$$;
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_disable_node_and_wait(nodename text, nodeport integer, force bool DEFAULT false)
|
||||
RETURNS void
|
||||
LANGUAGE plpgsql
|
||||
AS $function$
|
||||
BEGIN
|
||||
|
||||
PERFORM pg_catalog.citus_disable_node(nodename, nodeport, force);
|
||||
PERFORM public.wait_until_metadata_sync(30000);
|
||||
END;
|
||||
$function$;
|
||||
|
|
|
@ -0,0 +1,132 @@
|
|||
CREATE SCHEMA disable_node_with_replicated_tables;
|
||||
SET search_path TO disable_node_with_replicated_tables;
|
||||
SET citus.next_shard_id TO 101500;
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
CREATE TABLE replicated(a int, b int);
|
||||
SELECT create_distributed_table('replicated', 'a', shard_count:=2);
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE ref (a int, b int);
|
||||
SELECT create_reference_table('ref');
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO replicated SELECT i,i FROM generate_series(0,10)i;
|
||||
INSERT INTO ref SELECT i,i FROM generate_series(0,10)i;
|
||||
-- should be successfully disable node
|
||||
SELECT citus_disable_node('localhost', :worker_2_port, true);
|
||||
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT citus_activate_node('localhost', 57638) to activate this node back.
|
||||
citus_disable_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- the placement should be removed both from the coordinator
|
||||
-- and from the workers
|
||||
SELECT count(*) FROM pg_dist_placement WHERE shardid IN (101500, 101501, 101502);
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
3
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT count(*) FROM pg_dist_placement WHERE shardid IN (101500, 101501, 101502);
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
3
|
||||
(1 row)
|
||||
|
||||
SET search_path TO disable_node_with_replicated_tables;
|
||||
-- should be able to ingest data from both the worker and the coordinator
|
||||
INSERT INTO replicated SELECT i,i FROM generate_series(0,10)i;
|
||||
INSERT INTO ref SELECT i,i FROM generate_series(0,10)i;
|
||||
\c - - - :master_port
|
||||
SET search_path TO disable_node_with_replicated_tables;
|
||||
-- should be able to ingest data from both the worker and the coordinator
|
||||
INSERT INTO replicated SELECT i,i FROM generate_series(0,10)i;
|
||||
INSERT INTO ref SELECT i,i FROM generate_series(0,10)i;
|
||||
-- now, query with round-robin policy such that
|
||||
-- each query should hit different replicas
|
||||
SET citus.task_assignment_policy to "round-robin";
|
||||
SELECT count(*) FROM ref;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
33
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM ref;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
33
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM replicated;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
33
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM replicated;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
33
|
||||
(1 row)
|
||||
|
||||
-- now, we should be able to replicate the shards back
|
||||
SET client_min_messages TO ERROR;
|
||||
SELECT 1 FROM citus_activate_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT 1 FROM replicate_table_shards('replicated', shard_replication_factor:=2, shard_transfer_mode:='block_writes');
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
RESET client_min_messages;
|
||||
-- should be able to ingest data from both the worker and the coordinator
|
||||
INSERT INTO replicated SELECT i,i FROM generate_series(0,10)i;
|
||||
INSERT INTO ref SELECT i,i FROM generate_series(0,10)i;
|
||||
-- now, query with round-robin policy such that
|
||||
-- each query should hit different replicas
|
||||
SET citus.task_assignment_policy to "round-robin";
|
||||
SELECT count(*) FROM ref;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
44
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM ref;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
44
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM replicated;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
44
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM replicated;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
44
|
||||
(1 row)
|
||||
|
||||
SET client_min_messages TO ERROR;
|
||||
DROP SCHEMA disable_node_with_replicated_tables CASCADE;
|
|
@ -162,7 +162,7 @@ SELECT create_distributed_table('dist_table_test_2', 'a');
|
|||
|
||||
-- Mark tables as coordinator replicated in order to be able to test replicate_table_shards
|
||||
UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid IN
|
||||
('dist_table_test_2'::regclass);
|
||||
('dist_table_test_2'::regclass);
|
||||
-- replicate_table_shards should fail when the hostname GUC is set to a non-reachable node
|
||||
ALTER SYSTEM SET citus.local_hostname TO 'foobar';
|
||||
SELECT pg_reload_conf();
|
||||
|
@ -779,14 +779,10 @@ CREATE TABLE test_schema_support.nation_hash (
|
|||
n_regionkey integer not null,
|
||||
n_comment varchar(152)
|
||||
);
|
||||
SELECT master_create_distributed_table('test_schema_support.nation_hash', 'n_nationkey', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('test_schema_support.nation_hash', 4, 1);
|
||||
master_create_worker_shards
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SELECT create_distributed_table('test_schema_support.nation_hash', 'n_nationkey', 'hash');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
@ -797,18 +793,15 @@ CREATE TABLE test_schema_support.nation_hash2 (
|
|||
n_regionkey integer not null,
|
||||
n_comment varchar(152)
|
||||
);
|
||||
SELECT master_create_distributed_table('test_schema_support.nation_hash2', 'n_nationkey', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('test_schema_support.nation_hash2', 4, 1);
|
||||
master_create_worker_shards
|
||||
SELECT create_distributed_table('test_schema_support.nation_hash2', 'n_nationkey', 'hash');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Mark tables as coordinator replicated in order to be able to test replicate_table_shards
|
||||
UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid IN
|
||||
('test_schema_support.nation_hash2'::regclass, 'test_schema_support.nation_hash'::regclass);
|
||||
-- Shard count before replication
|
||||
SELECT COUNT(*) FROM pg_dist_shard_placement;
|
||||
count
|
||||
|
@ -817,22 +810,22 @@ SELECT COUNT(*) FROM pg_dist_shard_placement;
|
|||
(1 row)
|
||||
|
||||
SET search_path TO public;
|
||||
SELECT replicate_table_shards('test_schema_support.nation_hash', shard_transfer_mode:='block_writes');
|
||||
SELECT replicate_table_shards('test_schema_support.nation_hash', shard_replication_factor:=2, max_shard_copies:=1, shard_transfer_mode:='block_writes');
|
||||
replicate_table_shards
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Confirm replication
|
||||
-- Confirm replication, both tables replicated due to colocation
|
||||
SELECT COUNT(*) FROM pg_dist_shard_placement;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
10
|
||||
(1 row)
|
||||
|
||||
-- Test with search_path is set
|
||||
SET search_path TO test_schema_support;
|
||||
SELECT replicate_table_shards('nation_hash2', shard_transfer_mode:='block_writes');
|
||||
SELECT replicate_table_shards('nation_hash2', shard_replication_factor:=2, shard_transfer_mode:='block_writes');
|
||||
replicate_table_shards
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -1073,14 +1066,14 @@ CALL citus_cleanup_orphaned_shards();
|
|||
select * from pg_dist_placement ORDER BY placementid;
|
||||
placementid | shardid | shardstate | shardlength | groupid
|
||||
---------------------------------------------------------------------
|
||||
151 | 123023 | 1 | 0 | 14
|
||||
154 | 123024 | 1 | 0 | 14
|
||||
157 | 123027 | 1 | 0 | 14
|
||||
158 | 123028 | 1 | 0 | 14
|
||||
159 | 123021 | 1 | 0 | 16
|
||||
160 | 123025 | 1 | 0 | 16
|
||||
161 | 123022 | 1 | 0 | 16
|
||||
162 | 123026 | 1 | 0 | 16
|
||||
150 | 123023 | 1 | 0 | 14
|
||||
153 | 123024 | 1 | 0 | 14
|
||||
156 | 123027 | 1 | 0 | 14
|
||||
157 | 123028 | 1 | 0 | 14
|
||||
158 | 123021 | 1 | 0 | 16
|
||||
159 | 123025 | 1 | 0 | 16
|
||||
160 | 123022 | 1 | 0 | 16
|
||||
161 | 123026 | 1 | 0 | 16
|
||||
(8 rows)
|
||||
|
||||
-- Move all shards to worker1 again
|
||||
|
@ -2042,6 +2035,12 @@ SELECT 1 FROM master_remove_node('localhost', :master_port);
|
|||
1
|
||||
(1 row)
|
||||
|
||||
SELECT public.wait_until_metadata_sync(30000);
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
--
|
||||
-- Make sure that rebalance_table_shards() and replicate_table_shards() replicate
|
||||
-- reference tables to the coordinator when replicate_reference_tables_on_activate
|
||||
|
@ -2082,7 +2081,7 @@ SELECT replicate_table_shards('dist_table_test_3', max_shard_copies := 4, shar
|
|||
ERROR: Table 'dist_table_test_3' is streaming replicated. Shards of streaming replicated tables cannot be copied
|
||||
-- Mark table as coordinator replicated in order to be able to test replicate_table_shards
|
||||
UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid IN
|
||||
('dist_table_test_3'::regclass);
|
||||
('dist_table_test_3'::regclass);
|
||||
SELECT replicate_table_shards('dist_table_test_3', max_shard_copies := 4, shard_transfer_mode:='block_writes');
|
||||
replicate_table_shards
|
||||
---------------------------------------------------------------------
|
||||
|
@ -2101,6 +2100,12 @@ SELECT 1 FROM master_remove_node('localhost', :master_port);
|
|||
1
|
||||
(1 row)
|
||||
|
||||
SELECT public.wait_until_metadata_sync(30000);
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE rebalance_test_table(int_column int);
|
||||
SELECT master_create_distributed_table('rebalance_test_table', 'int_column', 'append');
|
||||
master_create_distributed_table
|
||||
|
@ -2141,6 +2146,12 @@ SELECT 1 FROM master_remove_node('localhost', :master_port);
|
|||
1
|
||||
(1 row)
|
||||
|
||||
SELECT public.wait_until_metadata_sync(30000);
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- reference table 2 will not have a replica identity, causing the rebalancer to not work
|
||||
-- when ran in the default mode. Instead we need to change the shard transfer mode to make
|
||||
-- it work. This verifies the shard transfer mode used in the rebalancer is used for the
|
||||
|
@ -2156,6 +2167,12 @@ SELECT 1 from master_remove_node('localhost', :worker_2_port);
|
|||
1
|
||||
(1 row)
|
||||
|
||||
SELECT public.wait_until_metadata_sync(30000);
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('t1','a');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -2207,6 +2224,12 @@ SELECT 1 from master_remove_node('localhost', :worker_2_port);
|
|||
1
|
||||
(1 row)
|
||||
|
||||
SELECT public.wait_until_metadata_sync(30000);
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE r1 (a int PRIMARY KEY, b int);
|
||||
SELECT create_reference_table('r1');
|
||||
create_reference_table
|
||||
|
@ -2260,6 +2283,12 @@ SELECT 1 from master_remove_node('localhost', :worker_2_port);
|
|||
1
|
||||
(1 row)
|
||||
|
||||
SELECT public.wait_until_metadata_sync(30000);
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE t1 (a int PRIMARY KEY, b int);
|
||||
CREATE TABLE r1 (a int PRIMARY KEY, b int);
|
||||
SELECT create_distributed_table('t1', 'a');
|
||||
|
@ -2295,7 +2324,7 @@ SELECT replicate_table_shards('t1', shard_replication_factor := 2);
|
|||
ERROR: Table 't1' is streaming replicated. Shards of streaming replicated tables cannot be copied
|
||||
-- Mark table as coordinator replicated in order to be able to test replicate_table_shards
|
||||
UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid IN
|
||||
('t1'::regclass);
|
||||
('t1'::regclass);
|
||||
SELECT replicate_table_shards('t1', shard_replication_factor := 2);
|
||||
replicate_table_shards
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -17,9 +17,8 @@ SELECT 1 FROM citus_set_coordinator_host('localhost', :master_port);
|
|||
(1 row)
|
||||
|
||||
-- coordinator cannot be disabled
|
||||
SELECT 1 FROM master_disable_node('localhost', :master_port);
|
||||
ERROR: Disabling localhost:xxxxx failed
|
||||
DETAIL: cannot change "isactive" field of the coordinator node
|
||||
SELECT 1 FROM citus_disable_node('localhost', :master_port);
|
||||
ERROR: cannot change "isactive" field of the coordinator node
|
||||
RESET client_min_messages;
|
||||
SELECT 1 FROM master_remove_node('localhost', :master_port);
|
||||
?column?
|
||||
|
|
|
@ -405,45 +405,6 @@ SELECT hasmetadata, metadatasynced, shouldhaveshards FROM pg_dist_node WHERE nod
|
|||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
-- verify that mx workers are updated when disabling/activating nodes
|
||||
SELECT citus_disable_node('localhost', :worker_1_port);
|
||||
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT citus_activate_node('localhost', 57637) to activate this node back.
|
||||
citus_disable_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
start_metadata_sync_to_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_2_port
|
||||
SELECT nodeport, isactive FROM pg_dist_node WHERE nodeport IN (:worker_1_port, :worker_2_port) ORDER BY nodeport;
|
||||
nodeport | isactive
|
||||
---------------------------------------------------------------------
|
||||
57637 | f
|
||||
57638 | t
|
||||
(2 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
SET client_min_messages TO ERROR;
|
||||
SELECT citus_activate_node('localhost', :worker_1_port);
|
||||
citus_activate_node
|
||||
---------------------------------------------------------------------
|
||||
16
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_2_port
|
||||
SELECT nodeport, isactive FROM pg_dist_node WHERE nodeport IN (:worker_1_port, :worker_2_port) ORDER BY nodeport;
|
||||
nodeport | isactive
|
||||
---------------------------------------------------------------------
|
||||
57637 | t
|
||||
57638 | t
|
||||
(2 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
NOTICE: dropping metadata on the node (localhost,57638)
|
||||
|
|
|
@ -42,7 +42,7 @@ ORDER BY 1;
|
|||
function citus_conninfo_cache_invalidate()
|
||||
function citus_copy_shard_placement(bigint,text,integer,text,integer,boolean,citus.shard_transfer_mode)
|
||||
function citus_create_restore_point(text)
|
||||
function citus_disable_node(text,integer)
|
||||
function citus_disable_node(text,integer,boolean)
|
||||
function citus_dist_local_group_cache_invalidate()
|
||||
function citus_dist_node_cache_invalidate()
|
||||
function citus_dist_object_cache_invalidate()
|
||||
|
|
|
@ -17,12 +17,8 @@ CREATE TABLE customer_copy_hash (
|
|||
c_mktsegment char(10),
|
||||
c_comment varchar(117),
|
||||
primary key (c_custkey));
|
||||
SELECT master_create_distributed_table('customer_copy_hash', 'c_custkey', 'hash');
|
||||
|
||||
-- Test COPY into empty hash-partitioned table
|
||||
COPY customer_copy_hash FROM '@abs_srcdir@/data/customer.1.data' WITH (DELIMITER '|');
|
||||
|
||||
SELECT master_create_worker_shards('customer_copy_hash', 64, 1);
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SELECT create_distributed_table('customer_copy_hash', 'c_custkey', shard_count:=64);
|
||||
|
||||
-- Test empty copy
|
||||
COPY customer_copy_hash FROM STDIN;
|
||||
|
@ -123,10 +119,8 @@ CREATE TABLE customer_with_default(
|
|||
c_custkey integer,
|
||||
c_name varchar(25) not null,
|
||||
c_time timestamp default now());
|
||||
|
||||
SELECT master_create_distributed_table('customer_with_default', 'c_custkey', 'hash');
|
||||
|
||||
SELECT master_create_worker_shards('customer_with_default', 64, 1);
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SELECT create_distributed_table('customer_with_default', 'c_custkey', shard_count:=64);
|
||||
|
||||
-- Test with default values for now() function
|
||||
COPY customer_with_default (c_custkey, c_name) FROM STDIN
|
||||
|
@ -221,6 +215,7 @@ CREATE TABLE customer_copy_append (
|
|||
c_mktsegment char(10),
|
||||
c_comment varchar(117));
|
||||
SELECT create_distributed_table('customer_copy_append', 'c_custkey', 'append');
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
|
||||
-- Test syntax error
|
||||
BEGIN;
|
||||
|
@ -444,6 +439,7 @@ COPY composite_partition_column_table FROM STDIN WITH (FORMAT 'csv', append_to_s
|
|||
|
||||
|
||||
-- Test copy on append distributed tables do not create shards on removed workers
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
CREATE TABLE numbers_append (a int, b int);
|
||||
SELECT create_distributed_table('numbers_append', 'a', 'append');
|
||||
|
||||
|
@ -470,7 +466,14 @@ SELECT shardid, nodename, nodeport
|
|||
WHERE logicalrelid = 'numbers_append'::regclass order by placementid;
|
||||
|
||||
-- disable the first node
|
||||
SET client_min_messages TO ERROR;
|
||||
\set VERBOSITY terse
|
||||
SELECT master_disable_node('localhost', :worker_1_port);
|
||||
SELECT public.wait_until_metadata_sync(30000);
|
||||
|
||||
RESET client_min_messages;
|
||||
\set VERBOSITY default
|
||||
|
||||
-- set replication factor to 1 so that copy will
|
||||
-- succeed without replication count error
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
@ -525,7 +528,7 @@ SELECT * FROM run_command_on_workers('CREATE USER test_user');
|
|||
\c - test_user
|
||||
SET citus.shard_count to 4;
|
||||
CREATE TABLE numbers_hash (a int, b int);
|
||||
SELECT create_distributed_table('numbers_hash', 'a');
|
||||
SELECT create_distributed_table('numbers_hash', 'a', colocate_with:='none');
|
||||
|
||||
COPY numbers_hash FROM STDIN WITH (FORMAT 'csv');
|
||||
1,1
|
||||
|
@ -553,7 +556,7 @@ COPY numbers_reference FROM STDIN WITH (FORMAT 'csv');
|
|||
|
||||
-- create another hash distributed table
|
||||
CREATE TABLE numbers_hash_other(a int, b int);
|
||||
SELECT create_distributed_table('numbers_hash_other', 'a');
|
||||
SELECT create_distributed_table('numbers_hash_other', 'a', colocate_with:='numbers_hash');
|
||||
SELECT shardid, shardstate, nodename, nodeport
|
||||
FROM pg_dist_shard_placement join pg_dist_shard using(shardid)
|
||||
WHERE logicalrelid = 'numbers_hash_other'::regclass order by shardid, nodeport;
|
||||
|
@ -631,8 +634,9 @@ DROP TABLE numbers_reference;
|
|||
-- test copy failure inside the node
|
||||
-- it will be done by changing definition of a shard table
|
||||
SET citus.shard_count to 4;
|
||||
SET citus.next_shard_id TO 560170;
|
||||
CREATE TABLE numbers_hash(a int, b int);
|
||||
SELECT create_distributed_table('numbers_hash', 'a');
|
||||
SELECT create_distributed_table('numbers_hash', 'a', colocate_with:='none');
|
||||
|
||||
\c - - - :worker_1_port
|
||||
ALTER TABLE numbers_hash_560170 DROP COLUMN b;
|
||||
|
|
|
@ -2,8 +2,8 @@ test: isolation_add_remove_node
|
|||
test: isolation_turn_mx_off
|
||||
test: isolation_update_node
|
||||
test: isolation_update_node_lock_writes
|
||||
test: isolation_ensure_dependency_activate_node
|
||||
test: isolation_turn_mx_on
|
||||
test: isolation_ensure_dependency_activate_node
|
||||
test: isolation_add_node_vs_reference_table_operations
|
||||
test: isolation_create_table_vs_add_remove_node
|
||||
test: isolation_master_update_node
|
||||
|
|
|
@ -20,6 +20,7 @@ test: multi_extension
|
|||
test: single_node
|
||||
test: single_node_truncate
|
||||
test: turn_mx_on
|
||||
test: multi_test_helpers multi_test_helpers_superuser
|
||||
test: multi_cluster_management
|
||||
|
||||
# below tests are placed right after multi_cluster_management as we do
|
||||
|
@ -30,7 +31,6 @@ test: escape_extension_name
|
|||
test: ref_citus_local_fkeys
|
||||
test: alter_database_owner
|
||||
|
||||
test: multi_test_helpers multi_test_helpers_superuser
|
||||
test: multi_test_catalog_views
|
||||
test: check_mx
|
||||
test: turn_mx_off
|
||||
|
|
|
@ -16,8 +16,8 @@
|
|||
test: turn_mx_off
|
||||
test: multi_extension
|
||||
test: multi_test_helpers multi_test_helpers_superuser
|
||||
test: multi_mx_node_metadata
|
||||
test: turn_mx_on
|
||||
test: multi_mx_node_metadata
|
||||
test: multi_cluster_management
|
||||
test: multi_mx_function_table_reference
|
||||
test: multi_test_catalog_views
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
test: multi_cluster_management
|
||||
test: multi_test_helpers multi_test_helpers_superuser multi_create_fdw
|
||||
test: multi_cluster_management
|
||||
test: multi_test_catalog_views
|
||||
test: replicated_table_disable_node
|
||||
|
||||
# ----------
|
||||
# The following distributed tests depend on creating a partitioned table and
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
test: multi_cluster_management
|
||||
test: turn_mx_off
|
||||
test: multi_test_helpers multi_test_helpers_superuser
|
||||
test: multi_cluster_management
|
||||
test: multi_test_catalog_views
|
||||
test: shard_rebalancer_unit
|
||||
test: turn_mx_off
|
||||
test: shard_rebalancer
|
||||
test: turn_mx_on
|
||||
test: foreign_key_to_reference_shard_rebalance
|
||||
test: multi_move_mx
|
||||
test: shard_move_deferred_delete
|
||||
|
|
|
@ -13,19 +13,9 @@ CREATE TABLE customer_copy_hash (
|
|||
c_mktsegment char(10),
|
||||
c_comment varchar(117),
|
||||
primary key (c_custkey));
|
||||
SELECT master_create_distributed_table('customer_copy_hash', 'c_custkey', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Test COPY into empty hash-partitioned table
|
||||
COPY customer_copy_hash FROM '@abs_srcdir@/data/customer.1.data' WITH (DELIMITER '|');
|
||||
ERROR: could not find any shards into which to copy
|
||||
DETAIL: No shards exist for distributed table "customer_copy_hash".
|
||||
HINT: Run master_create_worker_shards to create shards and try again.
|
||||
SELECT master_create_worker_shards('customer_copy_hash', 64, 1);
|
||||
master_create_worker_shards
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SELECT create_distributed_table('customer_copy_hash', 'c_custkey', shard_count:=64);
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
@ -146,14 +136,9 @@ CREATE TABLE customer_with_default(
|
|||
c_custkey integer,
|
||||
c_name varchar(25) not null,
|
||||
c_time timestamp default now());
|
||||
SELECT master_create_distributed_table('customer_with_default', 'c_custkey', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('customer_with_default', 64, 1);
|
||||
master_create_worker_shards
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SELECT create_distributed_table('customer_with_default', 'c_custkey', shard_count:=64);
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
@ -246,8 +231,7 @@ SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = :new_sh
|
|||
shardid | shardlength
|
||||
---------------------------------------------------------------------
|
||||
560129 | 0
|
||||
560129 | 0
|
||||
(2 rows)
|
||||
(1 row)
|
||||
|
||||
-- Update shard statistics for range-partitioned shard
|
||||
SELECT citus_update_shard_statistics(:new_shard_id);
|
||||
|
@ -260,8 +244,7 @@ SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = :new_sh
|
|||
shardid | shardlength
|
||||
---------------------------------------------------------------------
|
||||
560129 | 131072
|
||||
560129 | 131072
|
||||
(2 rows)
|
||||
(1 row)
|
||||
|
||||
-- Create a new append-partitioned table into which to COPY
|
||||
CREATE TABLE customer_copy_append (
|
||||
|
@ -279,6 +262,7 @@ SELECT create_distributed_table('customer_copy_append', 'c_custkey', 'append');
|
|||
|
||||
(1 row)
|
||||
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
-- Test syntax error
|
||||
BEGIN;
|
||||
SELECT master_create_empty_shard('customer_copy_append') AS shardid \gset
|
||||
|
@ -574,6 +558,7 @@ SELECT create_distributed_table('composite_partition_column_table', 'composite_c
|
|||
SELECT master_create_empty_shard('composite_partition_column_table') AS shardid \gset
|
||||
COPY composite_partition_column_table FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid);
|
||||
-- Test copy on append distributed tables do not create shards on removed workers
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
CREATE TABLE numbers_append (a int, b int);
|
||||
SELECT create_distributed_table('numbers_append', 'a', 'append');
|
||||
create_distributed_table
|
||||
|
@ -606,13 +591,18 @@ SELECT shardid, nodename, nodeport
|
|||
(4 rows)
|
||||
|
||||
-- disable the first node
|
||||
SET client_min_messages TO ERROR;
|
||||
\set VERBOSITY terse
|
||||
SELECT master_disable_node('localhost', :worker_1_port);
|
||||
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT citus_activate_node('localhost', 57637) to activate this node back.
|
||||
master_disable_node
|
||||
ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx
|
||||
SELECT public.wait_until_metadata_sync(30000);
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
RESET client_min_messages;
|
||||
\set VERBOSITY default
|
||||
-- set replication factor to 1 so that copy will
|
||||
-- succeed without replication count error
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
@ -630,7 +620,7 @@ SELECT shardid, nodename, nodeport
|
|||
560155 | localhost | 57638
|
||||
560156 | localhost | 57638
|
||||
560156 | localhost | 57637
|
||||
560157 | localhost | 57638
|
||||
560157 | localhost | 57637
|
||||
560158 | localhost | 57638
|
||||
(6 rows)
|
||||
|
||||
|
@ -658,7 +648,7 @@ SELECT shardid, nodename, nodeport
|
|||
560155 | localhost | 57638
|
||||
560156 | localhost | 57638
|
||||
560156 | localhost | 57637
|
||||
560157 | localhost | 57638
|
||||
560157 | localhost | 57637
|
||||
560158 | localhost | 57638
|
||||
560159 | localhost | 57637
|
||||
560159 | localhost | 57638
|
||||
|
@ -682,7 +672,7 @@ SELECT * FROM run_command_on_workers('CREATE USER test_user');
|
|||
\c - test_user
|
||||
SET citus.shard_count to 4;
|
||||
CREATE TABLE numbers_hash (a int, b int);
|
||||
SELECT create_distributed_table('numbers_hash', 'a');
|
||||
SELECT create_distributed_table('numbers_hash', 'a', colocate_with:='none');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -716,7 +706,7 @@ SELECT create_reference_table('numbers_reference');
|
|||
COPY numbers_reference FROM STDIN WITH (FORMAT 'csv');
|
||||
-- create another hash distributed table
|
||||
CREATE TABLE numbers_hash_other(a int, b int);
|
||||
SELECT create_distributed_table('numbers_hash_other', 'a');
|
||||
SELECT create_distributed_table('numbers_hash_other', 'a', colocate_with:='numbers_hash');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
@ -772,7 +762,7 @@ SELECT shardid, shardstate, nodename, nodeport
|
|||
-- try to insert into a reference table copy should fail
|
||||
COPY numbers_reference FROM STDIN WITH (FORMAT 'csv');
|
||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "test_user" is not permitted to log in
|
||||
CONTEXT: COPY numbers_reference, line 1: "3,1"
|
||||
CONTEXT: COPY numbers_reference, line 1: "3,1"
|
||||
-- verify shards for reference table are still valid
|
||||
SELECT shardid, shardstate, nodename, nodeport
|
||||
FROM pg_dist_shard_placement join pg_dist_shard using(shardid)
|
||||
|
@ -817,8 +807,9 @@ DROP TABLE numbers_reference;
|
|||
-- test copy failure inside the node
|
||||
-- it will be done by changing definition of a shard table
|
||||
SET citus.shard_count to 4;
|
||||
SET citus.next_shard_id TO 560170;
|
||||
CREATE TABLE numbers_hash(a int, b int);
|
||||
SELECT create_distributed_table('numbers_hash', 'a');
|
||||
SELECT create_distributed_table('numbers_hash', 'a', colocate_with:='none');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
|
|
@ -1,6 +1,10 @@
|
|||
setup
|
||||
{
|
||||
SELECT 1;
|
||||
CREATE OR REPLACE FUNCTION public.wait_until_metadata_sync(timeout INTEGER DEFAULT 15000)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus';
|
||||
}
|
||||
|
||||
teardown
|
||||
|
@ -38,6 +42,7 @@ step "s1-activate-node-1"
|
|||
step "s1-disable-node-1"
|
||||
{
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
}
|
||||
|
||||
step "s1-remove-node-1"
|
||||
|
@ -80,6 +85,7 @@ step "s2-activate-node-1"
|
|||
step "s2-disable-node-1"
|
||||
{
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
}
|
||||
|
||||
step "s2-remove-node-1"
|
||||
|
@ -128,7 +134,5 @@ permutation "s1-add-inactive-1" "s1-begin" "s1-disable-node-1" "s2-activate-node
|
|||
// activate and disable an inactive node node from 2 transactions, should be ok
|
||||
permutation "s1-add-inactive-1" "s1-begin" "s1-activate-node-1" "s2-disable-node-1" "s1-commit" "s1-show-nodes"
|
||||
|
||||
// activate and disable an inactive node from 2 transactions, one aborts
|
||||
permutation "s1-add-inactive-1" "s1-begin" "s1-activate-node-1" "s2-disable-node-1" "s1-abort" "s1-show-nodes"
|
||||
// disable an active node from 2 transactions, one aborts
|
||||
permutation "s1-add-node-1" "s1-begin" "s1-disable-node-1" "s2-disable-node-1" "s1-abort" "s1-show-nodes"
|
||||
|
|
|
@ -25,6 +25,7 @@ session "s1"
|
|||
step "s1-begin"
|
||||
{
|
||||
BEGIN;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
}
|
||||
|
||||
step "s1-update"
|
||||
|
@ -42,6 +43,7 @@ session "s2"
|
|||
step "s2-begin"
|
||||
{
|
||||
BEGIN;
|
||||
SELECT assign_distributed_transaction_id(0, 9, '2021-07-09 15:41:55.542377+02');
|
||||
}
|
||||
|
||||
step "s2-update"
|
||||
|
@ -59,6 +61,7 @@ session "s3"
|
|||
step "s3-begin"
|
||||
{
|
||||
BEGIN;
|
||||
SELECT assign_distributed_transaction_id(0, 10, '2021-07-09 15:41:55.542377+02');
|
||||
}
|
||||
|
||||
step "s3-update"
|
||||
|
|
|
@ -2,10 +2,10 @@
|
|||
// add single one of the nodes for the purpose of the test
|
||||
setup
|
||||
{
|
||||
CREATE OR REPLACE FUNCTION wait_until_metadata_sync(timeout INTEGER)
|
||||
CREATE OR REPLACE FUNCTION public.wait_until_metadata_sync(timeout INTEGER DEFAULT 15000)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT VOLATILE
|
||||
AS 'citus', $$wait_until_metadata_sync$$;
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus';
|
||||
|
||||
SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node;
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
@ -17,12 +17,6 @@ teardown
|
|||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
-- schema drops are not cascaded
|
||||
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
|
||||
DROP SCHEMA IF EXISTS myschema CASCADE;
|
||||
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
|
||||
DROP SCHEMA IF EXISTS myschema2 CASCADE;
|
||||
|
||||
RESET search_path;
|
||||
DROP TABLE IF EXISTS t1 CASCADE;
|
||||
DROP TABLE IF EXISTS t2 CASCADE;
|
||||
|
@ -182,34 +176,52 @@ step "s3-commit"
|
|||
COMMIT;
|
||||
}
|
||||
|
||||
step "s3-drop-coordinator-schemas"
|
||||
{
|
||||
-- schema drops are not cascaded
|
||||
-- and cannot be dropped in a single
|
||||
-- transaction in teardown
|
||||
-- because it'd self-deadlock
|
||||
-- instead we drop the schemas
|
||||
-- at the end of the permutations
|
||||
DROP SCHEMA IF EXISTS myschema CASCADE;
|
||||
DROP SCHEMA IF EXISTS myschema2 CASCADE;
|
||||
}
|
||||
|
||||
step "s3-drop-worker-schemas"
|
||||
{
|
||||
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
|
||||
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
|
||||
}
|
||||
|
||||
|
||||
// schema only tests
|
||||
permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-create-table" "s1-commit" "s2-print-distributed-objects"
|
||||
permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s1-add-worker" "s2-public-schema" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects"
|
||||
permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-public-schema" "s2-create-table" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects"
|
||||
permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-create-schema" "s2-create-table" "s1-commit" "s2-print-distributed-objects"
|
||||
permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s1-add-worker" "s2-create-schema" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects"
|
||||
permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-create-schema" "s2-create-table" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects"
|
||||
permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-create-table" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas"
|
||||
permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s1-add-worker" "s2-public-schema" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas"
|
||||
permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-public-schema" "s2-create-table" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas"
|
||||
permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-create-schema" "s2-create-table" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas"
|
||||
permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s1-add-worker" "s2-create-schema" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas"
|
||||
permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-create-schema" "s2-create-table" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas"
|
||||
|
||||
// concurrency tests with multi schema distribution
|
||||
permutation "s1-print-distributed-objects" "s2-create-schema" "s1-begin" "s2-begin" "s3-begin" "s1-add-worker" "s2-create-table" "s3-use-schema" "s3-create-table" "s1-commit" "s2-commit" "s3-commit" "s2-print-distributed-objects"
|
||||
permutation "s1-print-distributed-objects" "s1-add-worker" "s2-create-schema" "s2-begin" "s3-begin" "s3-use-schema" "s2-create-table" "s3-create-table" "s2-commit" "s3-commit" "s2-print-distributed-objects"
|
||||
permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s3-begin" "s1-add-worker" "s2-create-schema" "s3-create-schema2" "s2-create-table" "s3-create-table" "s1-commit" "s3-commit" "s2-commit" "s2-print-distributed-objects"
|
||||
permutation "s1-print-distributed-objects" "s2-create-schema" "s1-begin" "s2-begin" "s1-add-worker" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas"
|
||||
permutation "s1-print-distributed-objects" "s1-add-worker" "s2-create-schema" "s2-begin" "s3-begin" "s3-use-schema" "s2-create-table" "s3-create-table" "s2-commit" "s3-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas"
|
||||
permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s3-begin" "s1-add-worker" "s2-create-schema" "s3-create-schema2" "s2-create-table" "s3-create-table" "s1-commit" "s3-commit" "s2-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas"
|
||||
|
||||
// type and schema tests
|
||||
permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-create-type" "s1-commit" "s2-print-distributed-objects"
|
||||
permutation "s1-print-distributed-objects" "s1-begin" "s2-public-schema" "s2-create-type" "s1-add-worker" "s1-commit" "s2-print-distributed-objects"
|
||||
permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-create-schema" "s2-create-type" "s2-create-table-with-type" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects"
|
||||
permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-create-type" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas"
|
||||
permutation "s1-print-distributed-objects" "s1-begin" "s2-public-schema" "s2-create-type" "s1-add-worker" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas"
|
||||
permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-create-schema" "s2-create-type" "s2-create-table-with-type" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas"
|
||||
|
||||
// distributed function tests
|
||||
// isolation tests are not very simple psql, so trigger NOTIFY reliably for
|
||||
// s3-wait-for-metadata-sync step, we do "s2-begin" followed directly by
|
||||
// "s2-commit", because "COMMIT" syncs the messages
|
||||
|
||||
permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-distribute-function" "s1-commit" "s2-begin" "s2-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects"
|
||||
permutation "s1-print-distributed-objects" "s1-begin" "s2-public-schema" "s2-distribute-function" "s2-begin" "s2-commit" "s3-wait-for-metadata-sync" "s1-add-worker" "s1-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects"
|
||||
permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-distribute-function" "s1-commit" "s2-begin" "s2-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas"
|
||||
permutation "s1-print-distributed-objects" "s1-begin" "s2-public-schema" "s2-distribute-function" "s2-begin" "s2-commit" "s3-wait-for-metadata-sync" "s1-add-worker" "s1-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas"
|
||||
|
||||
// we cannot run the following operations concurrently
|
||||
// the problem is that NOTIFY event doesn't (reliably) happen before COMMIT
|
||||
// so we have to commit s2 before s1 starts
|
||||
permutation "s1-print-distributed-objects" "s2-begin" "s2-create-schema" "s2-distribute-function" "s2-commit" "s3-wait-for-metadata-sync" "s1-begin" "s1-add-worker" "s1-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects"
|
||||
permutation "s1-print-distributed-objects" "s2-begin" "s2-create-schema" "s2-distribute-function" "s2-commit" "s3-wait-for-metadata-sync" "s1-begin" "s1-add-worker" "s1-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas"
|
||||
|
|
|
@ -3,13 +3,13 @@ setup
|
|||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
SELECT nodeid, nodename, nodeport from pg_dist_node;
|
||||
SELECT nodeid, nodename, nodeport from pg_dist_node ORDER BY 1 DESC;
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node;
|
||||
SELECT nodeid, nodename, nodeport from pg_dist_node;
|
||||
SELECT nodeid, nodename, nodeport from pg_dist_node ORDER BY 1 DESC;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
--
|
||||
-- failure_add_disable_node tests master_add_node, master_remove_node
|
||||
-- master_activate_node for failures.
|
||||
-- master_disable_node and master_add_inactive_node can not be
|
||||
-- citus_disable_node_and_wait and master_add_inactive_node can not be
|
||||
-- tested as they don't create network activity
|
||||
--
|
||||
|
||||
|
@ -30,7 +30,8 @@ FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid)
|
|||
WHERE s.logicalrelid = 'user_table'::regclass
|
||||
ORDER BY placementid;
|
||||
|
||||
SELECT master_disable_node('localhost', :worker_2_proxy_port);
|
||||
SELECT citus_disable_node('localhost', :worker_2_proxy_port, true);
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
|
||||
SELECT * FROM master_get_active_worker_nodes()
|
||||
ORDER BY 1, 2;
|
||||
|
|
|
@ -6,6 +6,11 @@ ALTER SYSTEM SET citus.recover_2pc_interval TO -1;
|
|||
ALTER SYSTEM set citus.enable_statistics_collection TO false;
|
||||
SELECT pg_reload_conf();
|
||||
|
||||
CREATE OR REPLACE FUNCTION wait_until_metadata_sync(timeout INTEGER DEFAULT 15000)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus';
|
||||
|
||||
-- Add some helper functions for sending commands to mitmproxy
|
||||
|
||||
CREATE FUNCTION citus.mitmproxy(text) RETURNS TABLE(result text) AS $$
|
||||
|
|
|
@ -24,9 +24,11 @@ SELECT master_remove_node('localhost', :worker_2_port);
|
|||
-- verify that the node has been deleted
|
||||
SELECT master_get_active_worker_nodes();
|
||||
|
||||
-- try to disable a node with no placements see that node is removed
|
||||
-- try to disable a node with no placements see that node is s=removed
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT master_disable_node('localhost', :worker_2_port);
|
||||
|
||||
SELECT citus_disable_node('localhost', :worker_2_port);
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
SELECT master_get_active_worker_nodes();
|
||||
|
||||
-- add some shard placements to the cluster
|
||||
|
@ -48,6 +50,7 @@ TRUNCATE pg_dist_colocation;
|
|||
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1390000;
|
||||
|
||||
SELECT * FROM citus_activate_node('localhost', :worker_2_port);
|
||||
|
||||
CREATE TABLE cluster_management_test (col_1 text, col_2 int);
|
||||
SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash');
|
||||
|
||||
|
@ -58,7 +61,7 @@ SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHER
|
|||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_get_active_worker_nodes();
|
||||
|
||||
-- insert a row so that master_disable_node() exercises closing connections
|
||||
-- insert a row so that citus_disable_node() exercises closing connections
|
||||
CREATE TABLE test_reference_table (y int primary key, name text);
|
||||
SELECT create_reference_table('test_reference_table');
|
||||
INSERT INTO test_reference_table VALUES (1, '1');
|
||||
|
@ -66,16 +69,19 @@ INSERT INTO test_reference_table VALUES (1, '1');
|
|||
-- try to remove a node with active placements and reference tables
|
||||
SELECT citus_remove_node('localhost', :worker_2_port);
|
||||
|
||||
-- try to disable a node with active placements see that node is removed
|
||||
-- observe that a notification is displayed
|
||||
SELECT master_disable_node('localhost', :worker_2_port);
|
||||
-- try to disable a node with active placements
|
||||
-- which should fail because there are some placements
|
||||
-- which are the only placements for a given shard
|
||||
SELECT citus_disable_node('localhost', :worker_2_port);
|
||||
|
||||
SELECT master_get_active_worker_nodes();
|
||||
|
||||
-- try to disable a node which does not exist and see that an error is thrown
|
||||
SELECT master_disable_node('localhost.noexist', 2345);
|
||||
SELECT citus_disable_node('localhost.noexist', 2345);
|
||||
|
||||
-- drop the table without leaving a shard placement behind (messes up other tests)
|
||||
SELECT master_activate_node('localhost', :worker_2_port);
|
||||
|
||||
DROP TABLE test_reference_table, cluster_management_test;
|
||||
|
||||
-- create users like this so results of community and enterprise are same
|
||||
|
@ -90,7 +96,8 @@ GRANT EXECUTE ON FUNCTION master_activate_node(text,int) TO node_metadata_user;
|
|||
GRANT EXECUTE ON FUNCTION master_add_inactive_node(text,int,int,noderole,name) TO node_metadata_user;
|
||||
GRANT EXECUTE ON FUNCTION master_add_node(text,int,int,noderole,name) TO node_metadata_user;
|
||||
GRANT EXECUTE ON FUNCTION master_add_secondary_node(text,int,text,int,name) TO node_metadata_user;
|
||||
GRANT EXECUTE ON FUNCTION master_disable_node(text,int) TO node_metadata_user;
|
||||
GRANT EXECUTE ON FUNCTION citus_disable_node(text,int,bool) TO node_metadata_user;
|
||||
GRANT EXECUTE ON FUNCTION citus_disable_node_and_wait(text,int,bool) TO node_metadata_user;
|
||||
GRANT EXECUTE ON FUNCTION master_remove_node(text,int) TO node_metadata_user;
|
||||
GRANT EXECUTE ON FUNCTION master_update_node(int,text,int,bool,int) TO node_metadata_user;
|
||||
|
||||
|
@ -107,7 +114,7 @@ DELETE FROM citus.pg_dist_object WHERE objid = 'public'::regnamespace::oid;
|
|||
SET ROLE non_super_user;
|
||||
SELECT 1 FROM master_add_inactive_node('localhost', :worker_2_port + 1);
|
||||
SELECT 1 FROM master_activate_node('localhost', :worker_2_port + 1);
|
||||
SELECT 1 FROM master_disable_node('localhost', :worker_2_port + 1);
|
||||
SELECT 1 FROM citus_disable_node('localhost', :worker_2_port + 1);
|
||||
SELECT 1 FROM master_remove_node('localhost', :worker_2_port + 1);
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port + 1);
|
||||
SELECT 1 FROM master_add_secondary_node('localhost', :worker_2_port + 2, 'localhost', :worker_2_port);
|
||||
|
@ -119,7 +126,6 @@ SET citus.enable_object_propagation TO off; -- prevent master activate node to a
|
|||
BEGIN;
|
||||
SELECT 1 FROM master_add_inactive_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_activate_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_disable_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_remove_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_secondary_node('localhost', :worker_2_port + 2, 'localhost', :worker_2_port);
|
||||
|
@ -316,7 +322,8 @@ SELECT 1 FROM master_add_inactive_node('localhost', 9996, groupid => :worker_2_g
|
|||
-- check that you can add a seconary to a non-default cluster, and activate it, and remove it
|
||||
SELECT master_add_inactive_node('localhost', 9999, groupid => :worker_2_group, nodecluster => 'olap', noderole => 'secondary');
|
||||
SELECT master_activate_node('localhost', 9999);
|
||||
SELECT master_disable_node('localhost', 9999);
|
||||
SELECT citus_disable_node('localhost', 9999);
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
SELECT master_remove_node('localhost', 9999);
|
||||
|
||||
-- check that you can't manually add two primaries to a group
|
||||
|
|
|
@ -745,8 +745,8 @@ SELECT create_reference_table('dist_table_2');
|
|||
ALTER TABLE dist_table_1 ADD COLUMN b int;
|
||||
|
||||
SELECT master_add_node('localhost', :master_port, groupid => 0);
|
||||
SELECT master_disable_node('localhost', :worker_1_port);
|
||||
SELECT master_disable_node('localhost', :worker_2_port);
|
||||
SELECT citus_disable_node_and_wait('localhost', :worker_1_port);
|
||||
SELECT citus_disable_node_and_wait('localhost', :worker_2_port);
|
||||
SELECT master_remove_node('localhost', :worker_1_port);
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
|
||||
|
|
|
@ -98,7 +98,6 @@ SELECT r.a FROM ref r JOIN local_table lt on r.a = lt.a;
|
|||
|
||||
\c - - - :master_port
|
||||
SET search_path TO mx_add_coordinator,public;
|
||||
|
||||
SELECT stop_metadata_sync_to_node('localhost', :master_port);
|
||||
|
||||
SELECT * FROM ref ORDER BY a;
|
||||
|
@ -117,7 +116,6 @@ SELECT create_reference_table('referece_table');
|
|||
|
||||
CREATE TABLE distributed_table(id int PRIMARY KEY, value_1 int);
|
||||
SELECT create_distributed_table('distributed_table', 'id');
|
||||
|
||||
INSERT INTO local_fkey_table SELECT i FROM generate_Series(0,100)i;
|
||||
INSERT INTO referece_table SELECT i FROM generate_Series(0,100)i;
|
||||
INSERT INTO distributed_table SELECT i, i FROM generate_Series(0,100)i;
|
||||
|
@ -154,8 +152,8 @@ alter table local_fkey_table ALTER COLUMN id TYPE int;
|
|||
SET citus.force_max_query_parallelization TO ON;
|
||||
alter table distributed_table ALTER COLUMN value_1 TYPE bigint;
|
||||
alter table distributed_table ALTER COLUMN value_1 TYPE int;
|
||||
SET client_min_messages TO error;
|
||||
|
||||
SET client_min_messages TO ERROR;
|
||||
DROP TABLE distributed_table, referece_table, local_fkey_table;
|
||||
SELECT master_remove_node('localhost', :master_port);
|
||||
|
||||
|
|
|
@ -286,14 +286,14 @@ SELECT verify_metadata('localhost', :worker_1_port),
|
|||
-- Don't drop the reference table so it has shards on the nodes being disabled
|
||||
DROP TABLE dist_table_1, dist_table_2;
|
||||
|
||||
SELECT 1 FROM master_disable_node('localhost', :worker_2_port);
|
||||
SELECT pg_catalog.citus_disable_node_and_wait('localhost', :worker_2_port);
|
||||
SELECT verify_metadata('localhost', :worker_1_port);
|
||||
|
||||
SELECT 1 FROM master_activate_node('localhost', :worker_2_port);
|
||||
SELECT verify_metadata('localhost', :worker_1_port);
|
||||
|
||||
------------------------------------------------------------------------------------
|
||||
-- Test master_disable_node() when the node that is being disabled is actually down
|
||||
-- Test citus_disable_node_and_wait() when the node that is being disabled is actually down
|
||||
------------------------------------------------------------------------------------
|
||||
SELECT master_update_node(:nodeid_2, 'localhost', 1);
|
||||
SELECT wait_until_metadata_sync(30000);
|
||||
|
@ -301,12 +301,9 @@ SELECT wait_until_metadata_sync(30000);
|
|||
-- set metadatasynced so we try porpagating metadata changes
|
||||
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid IN (:nodeid_1, :nodeid_2);
|
||||
|
||||
-- should not error out, master_disable_node is tolerant for node failures
|
||||
SELECT 1 FROM master_disable_node('localhost', 1);
|
||||
|
||||
-- try again after stopping metadata sync
|
||||
SELECT stop_metadata_sync_to_node('localhost', 1);
|
||||
SELECT 1 FROM master_disable_node('localhost', 1);
|
||||
-- should not error out, citus_disable_node is tolerant for node failures
|
||||
-- but we should not wait metadata syncing to finish as this node is down
|
||||
SELECT 1 FROM citus_disable_node('localhost', 1, true);
|
||||
|
||||
SELECT verify_metadata('localhost', :worker_1_port);
|
||||
|
||||
|
@ -317,7 +314,7 @@ SELECT 1 FROM master_activate_node('localhost', :worker_2_port);
|
|||
SELECT verify_metadata('localhost', :worker_1_port);
|
||||
|
||||
------------------------------------------------------------------------------------
|
||||
-- Test master_disable_node() when the other node is down
|
||||
-- Test citus_disable_node_and_wait() when the other node is down
|
||||
------------------------------------------------------------------------------------
|
||||
-- node 1 is down.
|
||||
SELECT master_update_node(:nodeid_1, 'localhost', 1);
|
||||
|
@ -326,12 +323,13 @@ SELECT wait_until_metadata_sync(30000);
|
|||
-- set metadatasynced so we try porpagating metadata changes
|
||||
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid IN (:nodeid_1, :nodeid_2);
|
||||
|
||||
-- should error out
|
||||
SELECT 1 FROM master_disable_node('localhost', :worker_2_port);
|
||||
-- should not error out, citus_disable_node is tolerant for node failures
|
||||
-- but we should not wait metadata syncing to finish as this node is down
|
||||
SELECT 1 FROM citus_disable_node('localhost', :worker_2_port);
|
||||
|
||||
-- try again after stopping metadata sync
|
||||
SELECT stop_metadata_sync_to_node('localhost', 1);
|
||||
SELECT 1 FROM master_disable_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM citus_disable_node_and_wait('localhost', :worker_2_port);
|
||||
|
||||
-- bring up node 1
|
||||
SELECT master_update_node(:nodeid_1, 'localhost', :worker_1_port);
|
||||
|
|
|
@ -48,7 +48,8 @@ SELECT create_reference_table('remove_node_reference_table');
|
|||
SELECT 1 FROM master_add_node('localhost', 9001, groupid=>:worker_2_group, noderole=>'secondary');
|
||||
SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group;
|
||||
-- make sure when we disable a secondary we don't remove any placements
|
||||
SELECT master_disable_node('localhost', 9001);
|
||||
SELECT citus_disable_node('localhost', 9001);
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
SELECT isactive FROM pg_dist_node WHERE nodeport = 9001;
|
||||
SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group;
|
||||
-- make sure when we activate a secondary we don't add any placements
|
||||
|
@ -130,7 +131,8 @@ SELECT master_remove_node('localhost', :worker_2_port);
|
|||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
|
||||
-- try to disable the node before removing it (this used to crash)
|
||||
SELECT master_disable_node('localhost', :worker_2_port);
|
||||
SELECT citus_disable_node('localhost', :worker_2_port);
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
|
||||
-- re-add the node for the next test
|
||||
|
@ -544,9 +546,9 @@ SET citus.replicate_reference_tables_on_activate TO off;
|
|||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
|
||||
|
||||
-- test with master_disable_node
|
||||
-- test with citus_disable_node_and_wait
|
||||
|
||||
-- status before master_disable_node
|
||||
-- status before citus_disable_node_and_wait
|
||||
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||
|
||||
SELECT
|
||||
|
@ -579,9 +581,10 @@ ORDER BY shardid ASC;
|
|||
|
||||
\c - - - :master_port
|
||||
|
||||
SELECT master_disable_node('localhost', :worker_2_port);
|
||||
SELECT citus_disable_node('localhost', :worker_2_port);
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
|
||||
-- status after master_disable_node
|
||||
-- status after citus_disable_node_and_wait
|
||||
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||
|
||||
SELECT
|
||||
|
|
|
@ -19,6 +19,10 @@ SELECT citus_relation_size('non_distributed_table');
|
|||
SELECT citus_total_relation_size('non_distributed_table');
|
||||
DROP TABLE non_distributed_table;
|
||||
|
||||
-- fix broken placements via disabling the node
|
||||
SET client_min_messages TO ERROR;
|
||||
SELECT replicate_table_shards('lineitem_hash_part', shard_replication_factor:=2, shard_transfer_mode:='block_writes');
|
||||
|
||||
-- Tests on distributed table with replication factor > 1
|
||||
VACUUM (FULL) lineitem_hash_part;
|
||||
|
||||
|
|
|
@ -134,3 +134,4 @@ BEGIN
|
|||
END LOOP;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
|
|
|
@ -54,3 +54,14 @@ CREATE OR REPLACE FUNCTION pg_catalog.partition_task_list_results(resultIdPrefix
|
|||
CREATE OR REPLACE FUNCTION top_transaction_context_size() RETURNS BIGINT
|
||||
LANGUAGE C STRICT VOLATILE
|
||||
AS 'citus', $$top_transaction_context_size$$;
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_disable_node_and_wait(nodename text, nodeport integer, force bool DEFAULT false)
|
||||
RETURNS void
|
||||
LANGUAGE plpgsql
|
||||
AS $function$
|
||||
BEGIN
|
||||
|
||||
PERFORM pg_catalog.citus_disable_node(nodename, nodeport, force);
|
||||
PERFORM public.wait_until_metadata_sync(30000);
|
||||
END;
|
||||
$function$;
|
||||
|
|
|
@ -0,0 +1,69 @@
|
|||
CREATE SCHEMA disable_node_with_replicated_tables;
|
||||
SET search_path TO disable_node_with_replicated_tables;
|
||||
|
||||
SET citus.next_shard_id TO 101500;
|
||||
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
CREATE TABLE replicated(a int, b int);
|
||||
SELECT create_distributed_table('replicated', 'a', shard_count:=2);
|
||||
|
||||
CREATE TABLE ref (a int, b int);
|
||||
SELECT create_reference_table('ref');
|
||||
|
||||
INSERT INTO replicated SELECT i,i FROM generate_series(0,10)i;
|
||||
INSERT INTO ref SELECT i,i FROM generate_series(0,10)i;
|
||||
|
||||
|
||||
-- should be successfully disable node
|
||||
SELECT citus_disable_node('localhost', :worker_2_port, true);
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
|
||||
-- the placement should be removed both from the coordinator
|
||||
-- and from the workers
|
||||
SELECT count(*) FROM pg_dist_placement WHERE shardid IN (101500, 101501, 101502);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT count(*) FROM pg_dist_placement WHERE shardid IN (101500, 101501, 101502);
|
||||
SET search_path TO disable_node_with_replicated_tables;
|
||||
|
||||
-- should be able to ingest data from both the worker and the coordinator
|
||||
INSERT INTO replicated SELECT i,i FROM generate_series(0,10)i;
|
||||
INSERT INTO ref SELECT i,i FROM generate_series(0,10)i;
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO disable_node_with_replicated_tables;
|
||||
|
||||
-- should be able to ingest data from both the worker and the coordinator
|
||||
INSERT INTO replicated SELECT i,i FROM generate_series(0,10)i;
|
||||
INSERT INTO ref SELECT i,i FROM generate_series(0,10)i;
|
||||
|
||||
-- now, query with round-robin policy such that
|
||||
-- each query should hit different replicas
|
||||
SET citus.task_assignment_policy to "round-robin";
|
||||
SELECT count(*) FROM ref;
|
||||
SELECT count(*) FROM ref;
|
||||
|
||||
SELECT count(*) FROM replicated;
|
||||
SELECT count(*) FROM replicated;
|
||||
|
||||
-- now, we should be able to replicate the shards back
|
||||
SET client_min_messages TO ERROR;
|
||||
SELECT 1 FROM citus_activate_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM replicate_table_shards('replicated', shard_replication_factor:=2, shard_transfer_mode:='block_writes');
|
||||
RESET client_min_messages;
|
||||
|
||||
-- should be able to ingest data from both the worker and the coordinator
|
||||
INSERT INTO replicated SELECT i,i FROM generate_series(0,10)i;
|
||||
INSERT INTO ref SELECT i,i FROM generate_series(0,10)i;
|
||||
|
||||
-- now, query with round-robin policy such that
|
||||
-- each query should hit different replicas
|
||||
SET citus.task_assignment_policy to "round-robin";
|
||||
SELECT count(*) FROM ref;
|
||||
SELECT count(*) FROM ref;
|
||||
|
||||
SELECT count(*) FROM replicated;
|
||||
SELECT count(*) FROM replicated;
|
||||
|
||||
SET client_min_messages TO ERROR;
|
||||
DROP SCHEMA disable_node_with_replicated_tables CASCADE;
|
|
@ -82,7 +82,7 @@ SELECT create_distributed_table('dist_table_test_2', 'a');
|
|||
|
||||
-- Mark tables as coordinator replicated in order to be able to test replicate_table_shards
|
||||
UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid IN
|
||||
('dist_table_test_2'::regclass);
|
||||
('dist_table_test_2'::regclass);
|
||||
|
||||
-- replicate_table_shards should fail when the hostname GUC is set to a non-reachable node
|
||||
ALTER SYSTEM SET citus.local_hostname TO 'foobar';
|
||||
|
@ -550,8 +550,9 @@ CREATE TABLE test_schema_support.nation_hash (
|
|||
n_comment varchar(152)
|
||||
);
|
||||
|
||||
SELECT master_create_distributed_table('test_schema_support.nation_hash', 'n_nationkey', 'hash');
|
||||
SELECT master_create_worker_shards('test_schema_support.nation_hash', 4, 1);
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SELECT create_distributed_table('test_schema_support.nation_hash', 'n_nationkey', 'hash');
|
||||
|
||||
CREATE TABLE test_schema_support.nation_hash2 (
|
||||
n_nationkey integer not null,
|
||||
|
@ -560,21 +561,24 @@ CREATE TABLE test_schema_support.nation_hash2 (
|
|||
n_comment varchar(152)
|
||||
);
|
||||
|
||||
SELECT master_create_distributed_table('test_schema_support.nation_hash2', 'n_nationkey', 'hash');
|
||||
SELECT master_create_worker_shards('test_schema_support.nation_hash2', 4, 1);
|
||||
SELECT create_distributed_table('test_schema_support.nation_hash2', 'n_nationkey', 'hash');
|
||||
|
||||
-- Mark tables as coordinator replicated in order to be able to test replicate_table_shards
|
||||
UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid IN
|
||||
('test_schema_support.nation_hash2'::regclass, 'test_schema_support.nation_hash'::regclass);
|
||||
|
||||
-- Shard count before replication
|
||||
SELECT COUNT(*) FROM pg_dist_shard_placement;
|
||||
|
||||
SET search_path TO public;
|
||||
SELECT replicate_table_shards('test_schema_support.nation_hash', shard_transfer_mode:='block_writes');
|
||||
SELECT replicate_table_shards('test_schema_support.nation_hash', shard_replication_factor:=2, max_shard_copies:=1, shard_transfer_mode:='block_writes');
|
||||
|
||||
-- Confirm replication
|
||||
-- Confirm replication, both tables replicated due to colocation
|
||||
SELECT COUNT(*) FROM pg_dist_shard_placement;
|
||||
|
||||
-- Test with search_path is set
|
||||
SET search_path TO test_schema_support;
|
||||
SELECT replicate_table_shards('nation_hash2', shard_transfer_mode:='block_writes');
|
||||
SELECT replicate_table_shards('nation_hash2', shard_replication_factor:=2, shard_transfer_mode:='block_writes');
|
||||
|
||||
-- Confirm replication
|
||||
SELECT COUNT(*) FROM pg_dist_shard_placement;
|
||||
|
@ -1229,7 +1233,7 @@ DROP TABLE tab;
|
|||
|
||||
-- we don't need the coordinator on pg_dist_node anymore
|
||||
SELECT 1 FROM master_remove_node('localhost', :master_port);
|
||||
|
||||
SELECT public.wait_until_metadata_sync(30000);
|
||||
|
||||
--
|
||||
-- Make sure that rebalance_table_shards() and replicate_table_shards() replicate
|
||||
|
@ -1258,13 +1262,14 @@ SELECT replicate_table_shards('dist_table_test_3', max_shard_copies := 4, shar
|
|||
|
||||
-- Mark table as coordinator replicated in order to be able to test replicate_table_shards
|
||||
UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid IN
|
||||
('dist_table_test_3'::regclass);
|
||||
('dist_table_test_3'::regclass);
|
||||
|
||||
SELECT replicate_table_shards('dist_table_test_3', max_shard_copies := 4, shard_transfer_mode:='block_writes');
|
||||
|
||||
SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'ref_table'::regclass;
|
||||
|
||||
SELECT 1 FROM master_remove_node('localhost', :master_port);
|
||||
SELECT public.wait_until_metadata_sync(30000);
|
||||
|
||||
CREATE TABLE rebalance_test_table(int_column int);
|
||||
SELECT master_create_distributed_table('rebalance_test_table', 'int_column', 'append');
|
||||
|
@ -1283,6 +1288,8 @@ SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE lo
|
|||
DROP TABLE dist_table_test_3, rebalance_test_table, ref_table;
|
||||
|
||||
SELECT 1 FROM master_remove_node('localhost', :master_port);
|
||||
SELECT public.wait_until_metadata_sync(30000);
|
||||
|
||||
|
||||
-- reference table 2 will not have a replica identity, causing the rebalancer to not work
|
||||
-- when ran in the default mode. Instead we need to change the shard transfer mode to make
|
||||
|
@ -1297,6 +1304,7 @@ CREATE TABLE r2 (a int, b int);
|
|||
-- node without the reference tables
|
||||
|
||||
SELECT 1 from master_remove_node('localhost', :worker_2_port);
|
||||
SELECT public.wait_until_metadata_sync(30000);
|
||||
|
||||
SELECT create_distributed_table('t1','a');
|
||||
SELECT create_reference_table('r1');
|
||||
|
@ -1321,6 +1329,7 @@ SELECT count(*) FROM pg_dist_partition;
|
|||
-- executing the rebalancer
|
||||
|
||||
SELECT 1 from master_remove_node('localhost', :worker_2_port);
|
||||
SELECT public.wait_until_metadata_sync(30000);
|
||||
|
||||
CREATE TABLE r1 (a int PRIMARY KEY, b int);
|
||||
SELECT create_reference_table('r1');
|
||||
|
@ -1353,6 +1362,7 @@ DROP TABLE r1;
|
|||
-- fail.
|
||||
|
||||
SELECT 1 from master_remove_node('localhost', :worker_2_port);
|
||||
SELECT public.wait_until_metadata_sync(30000);
|
||||
|
||||
CREATE TABLE t1 (a int PRIMARY KEY, b int);
|
||||
CREATE TABLE r1 (a int PRIMARY KEY, b int);
|
||||
|
@ -1372,7 +1382,7 @@ SELECT replicate_table_shards('t1', shard_replication_factor := 2);
|
|||
|
||||
-- Mark table as coordinator replicated in order to be able to test replicate_table_shards
|
||||
UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid IN
|
||||
('t1'::regclass);
|
||||
('t1'::regclass);
|
||||
SELECT replicate_table_shards('t1', shard_replication_factor := 2);
|
||||
|
||||
-- verify the reference table is on all nodes after replicate_table_shards
|
||||
|
@ -1382,3 +1392,4 @@ JOIN pg_dist_shard_placement USING (shardid)
|
|||
WHERE logicalrelid = 'r1'::regclass;
|
||||
|
||||
DROP TABLE t1, r1;
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ SET client_min_messages TO WARNING;
|
|||
SELECT 1 FROM citus_set_coordinator_host('localhost', :master_port);
|
||||
|
||||
-- coordinator cannot be disabled
|
||||
SELECT 1 FROM master_disable_node('localhost', :master_port);
|
||||
SELECT 1 FROM citus_disable_node('localhost', :master_port);
|
||||
|
||||
RESET client_min_messages;
|
||||
|
||||
|
|
|
@ -180,21 +180,6 @@ SELECT hasmetadata, metadatasynced, shouldhaveshards FROM pg_dist_node WHERE nod
|
|||
\c - - - :worker_2_port
|
||||
SELECT hasmetadata, metadatasynced, shouldhaveshards FROM pg_dist_node WHERE nodeport IN (:worker_1_port, :worker_2_port) ORDER BY nodeport;
|
||||
|
||||
\c - - - :master_port
|
||||
-- verify that mx workers are updated when disabling/activating nodes
|
||||
SELECT citus_disable_node('localhost', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
|
||||
\c - - - :worker_2_port
|
||||
SELECT nodeport, isactive FROM pg_dist_node WHERE nodeport IN (:worker_1_port, :worker_2_port) ORDER BY nodeport;
|
||||
|
||||
\c - - - :master_port
|
||||
SET client_min_messages TO ERROR;
|
||||
SELECT citus_activate_node('localhost', :worker_1_port);
|
||||
|
||||
\c - - - :worker_2_port
|
||||
SELECT nodeport, isactive FROM pg_dist_node WHERE nodeport IN (:worker_1_port, :worker_2_port) ORDER BY nodeport;
|
||||
|
||||
\c - - - :master_port
|
||||
SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
|
||||
|
|
Loading…
Reference in New Issue