Merge pull request #5486 from citusdata/disable_node_async

Allow disabling node(s) when multiple failures happen
pull/5408/head^2
Önder Kalacı 2021-12-01 10:48:49 +01:00 committed by GitHub
commit ab365a335d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
58 changed files with 1552 additions and 902 deletions

View File

@ -1093,7 +1093,7 @@ EnsureSequentialModeForFunctionDDL(void)
* and triggers the metadata syncs if the node has not the metadata. Later, * and triggers the metadata syncs if the node has not the metadata. Later,
* maintenance daemon will sync the metadata to nodes. * maintenance daemon will sync the metadata to nodes.
*/ */
static void void
TriggerSyncMetadataToPrimaryNodes(void) TriggerSyncMetadataToPrimaryNodes(void)
{ {
List *workerList = ActivePrimaryNonCoordinatorNodeList(ShareLock); List *workerList = ActivePrimaryNonCoordinatorNodeList(ShareLock);

View File

@ -428,22 +428,62 @@ citus_disable_node(PG_FUNCTION_ARGS)
{ {
text *nodeNameText = PG_GETARG_TEXT_P(0); text *nodeNameText = PG_GETARG_TEXT_P(0);
int32 nodePort = PG_GETARG_INT32(1); int32 nodePort = PG_GETARG_INT32(1);
bool forceDisableNode = PG_GETARG_BOOL(2);
char *nodeName = text_to_cstring(nodeNameText); char *nodeName = text_to_cstring(nodeNameText);
WorkerNode *workerNode = ModifiableWorkerNode(nodeName, nodePort); WorkerNode *workerNode = ModifiableWorkerNode(nodeName, nodePort);
bool isActive = false;
bool onlyConsiderActivePlacements = false;
MemoryContext savedContext = CurrentMemoryContext;
PG_TRY(); /* there is no concept of invalid coordinator */
bool isActive = false;
ErrorIfCoordinatorMetadataSetFalse(workerNode, BoolGetDatum(isActive),
"isactive");
WorkerNode *firstWorkerNode = GetFirstPrimaryWorkerNode();
if (!forceDisableNode && firstWorkerNode &&
firstWorkerNode->nodeId == workerNode->nodeId)
{ {
/*
* We sync metadata async and optionally in the background worker,
* it would mean that some nodes might get the updates while other
* not. And, if the node metadata that is changing is the first
* worker node, the problem gets nasty. We serialize modifications
* to replicated tables by acquiring locks on the first worker node.
*
* If some nodes get the metadata changes and some do not, they'd be
* acquiring the locks on different nodes. Hence, having the
* possibility of diverged shard placements for the same shard.
*
* To prevent that, we currently do not allow disabling the first
* worker node.
*/
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("disabling the first worker node in the "
"metadata is not allowed"),
errhint("You can force disabling node, but this operation "
"might cause replicated shards to diverge: SELECT "
"citus_disable_node('%s', %d, force:=true);",
workerNode->workerName,
nodePort)));
}
/*
* First, locally mark the node as inactive. We'll later trigger background
* worker to sync the metadata changes to the relevant nodes.
*/
workerNode =
SetWorkerColumnLocalOnly(workerNode,
Anum_pg_dist_node_isactive,
BoolGetDatum(isActive));
if (NodeIsPrimary(workerNode)) if (NodeIsPrimary(workerNode))
{ {
/* /*
* Delete reference table placements so they are not taken into account * We do not allow disabling nodes if it contains any
* for the check if there are placements after this. * primary placement that is the "only" active placement
* for any given shard.
*/ */
DeleteAllReferenceTablePlacementsFromNodeGroup(workerNode->groupId); ErrorIfNodeContainsNonRemovablePlacements(workerNode);
bool onlyConsiderActivePlacements = false;
if (NodeGroupHasShardPlacements(workerNode->groupId, if (NodeGroupHasShardPlacements(workerNode->groupId,
onlyConsiderActivePlacements)) onlyConsiderActivePlacements))
{ {
@ -456,36 +496,30 @@ citus_disable_node(PG_FUNCTION_ARGS)
workerNode->workerName, workerNode->workerName,
nodePort))); nodePort)));
} }
/*
* Delete replicated table placements from the coordinator's metadata,
* but not remotely. That is because one more more of the remote
* nodes might be down. Instead, we let the background worker
* to sync the metadata when possible.
*/
bool forceRemoteDelete = false;
DeleteAllReplicatedTablePlacementsFromNodeGroup(workerNode->groupId,
forceRemoteDelete);
} }
SetNodeState(nodeName, nodePort, isActive);
TransactionModifiedNodeMetadata = true; TransactionModifiedNodeMetadata = true;
}
PG_CATCH();
{
/* CopyErrorData() requires (CurrentMemoryContext != ErrorContext) */
MemoryContextSwitchTo(savedContext);
ErrorData *edata = CopyErrorData();
if (ClusterHasKnownMetadataWorkers()) /*
* We have not propagated the metadata changes yet, make sure that all the
* active nodes get the metadata updates. We defer this operation to the
* background worker to make it possible disabling nodes when multiple nodes
* are down.
*/
if (UnsetMetadataSyncedForAll())
{ {
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), TriggerMetadataSyncOnCommit();
errmsg("Disabling %s:%d failed", workerNode->workerName,
nodePort),
errdetail("%s", edata->message),
errhint(
"If you are using MX, try stop_metadata_sync_to_node(hostname, port) "
"for nodes that are down before disabling them.")));
} }
else
{
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("Disabling %s:%d failed", workerNode->workerName,
nodePort),
errdetail("%s", edata->message)));
}
}
PG_END_TRY();
PG_RETURN_VOID(); PG_RETURN_VOID();
} }
@ -836,15 +870,43 @@ ActivateNode(char *nodeName, int nodePort)
/* take an exclusive lock on pg_dist_node to serialize pg_dist_node changes */ /* take an exclusive lock on pg_dist_node to serialize pg_dist_node changes */
LockRelationOid(DistNodeRelationId(), ExclusiveLock); LockRelationOid(DistNodeRelationId(), ExclusiveLock);
WorkerNode *newWorkerNode = SetNodeState(nodeName, nodePort, isActive); /*
* First, locally mark the node is active, if everything goes well,
* we are going to sync this information to all the metadata nodes.
*/
WorkerNode *workerNode = FindWorkerNodeAnyCluster(nodeName, nodePort);
if (workerNode == NULL)
{
ereport(ERROR, (errmsg("node at \"%s:%u\" does not exist", nodeName, nodePort)));
}
SetUpDistributedTableDependencies(newWorkerNode); workerNode =
SetWorkerColumnLocalOnly(workerNode, Anum_pg_dist_node_isactive,
BoolGetDatum(isActive));
bool syncMetadata =
EnableMetadataSyncByDefault && NodeIsPrimary(workerNode);
if (EnableMetadataSyncByDefault && NodeIsPrimary(newWorkerNode)) if (syncMetadata)
{
/*
* We are going to sync the metadata anyway in this transaction, so do
* not fail just because the current metadata is not synced.
*/
SetWorkerColumn(workerNode, Anum_pg_dist_node_metadatasynced,
BoolGetDatum(isActive));
}
SetUpDistributedTableDependencies(workerNode);
if (syncMetadata)
{ {
StartMetadataSyncToNode(nodeName, nodePort); StartMetadataSyncToNode(nodeName, nodePort);
} }
/* finally, let all other active metadata nodes to learn about this change */
WorkerNode *newWorkerNode = SetNodeState(nodeName, nodePort, isActive);
Assert(newWorkerNode->nodeId == workerNode->nodeId);
return newWorkerNode->nodeId; return newWorkerNode->nodeId;
} }
@ -1303,7 +1365,9 @@ RemoveNodeFromCluster(char *nodeName, int32 nodePort)
* Delete reference table placements so they are not taken into account * Delete reference table placements so they are not taken into account
* for the check if there are placements after this. * for the check if there are placements after this.
*/ */
DeleteAllReferenceTablePlacementsFromNodeGroup(workerNode->groupId); bool forceRemoteDelete = true;
DeleteAllReplicatedTablePlacementsFromNodeGroup(workerNode->groupId,
forceRemoteDelete);
/* /*
* Secondary nodes are read-only, never 2PC is used. * Secondary nodes are read-only, never 2PC is used.
@ -1362,7 +1426,7 @@ ErrorIfNodeContainsNonRemovablePlacements(WorkerNode *workerNode)
/* /*
* PlacementHasActivePlacementOnAnotherGroup returns true if there is at least * PlacementHasActivePlacementOnAnotherGroup returns true if there is at least
* one more healthy placement of the input sourcePlacement on another group. * one more active placement of the input sourcePlacement on another group.
*/ */
static bool static bool
PlacementHasActivePlacementOnAnotherGroup(GroupShardPlacement *sourcePlacement) PlacementHasActivePlacementOnAnotherGroup(GroupShardPlacement *sourcePlacement)
@ -1370,18 +1434,18 @@ PlacementHasActivePlacementOnAnotherGroup(GroupShardPlacement *sourcePlacement)
uint64 shardId = sourcePlacement->shardId; uint64 shardId = sourcePlacement->shardId;
List *activePlacementList = ActiveShardPlacementList(shardId); List *activePlacementList = ActiveShardPlacementList(shardId);
bool foundHealtyPlacementOnAnotherGroup = false; bool foundActivePlacementOnAnotherGroup = false;
ShardPlacement *activePlacement = NULL; ShardPlacement *activePlacement = NULL;
foreach_ptr(activePlacement, activePlacementList) foreach_ptr(activePlacement, activePlacementList)
{ {
if (activePlacement->groupId != sourcePlacement->groupId) if (activePlacement->groupId != sourcePlacement->groupId)
{ {
foundHealtyPlacementOnAnotherGroup = true; foundActivePlacementOnAnotherGroup = true;
break; break;
} }
} }
return foundHealtyPlacementOnAnotherGroup; return foundActivePlacementOnAnotherGroup;
} }

View File

@ -1,6 +1,7 @@
-- citus--10.2-4--11.0-1 -- citus--10.2-4--11.0-1
-- bump version to 11.0-1 -- bump version to 11.0-1
#include "udfs/citus_disable_node/11.0-1.sql"
DROP FUNCTION IF EXISTS pg_catalog.master_apply_delete_command(text); DROP FUNCTION IF EXISTS pg_catalog.master_apply_delete_command(text);
DROP FUNCTION pg_catalog.master_get_table_metadata(text); DROP FUNCTION pg_catalog.master_get_table_metadata(text);

View File

@ -31,3 +31,11 @@ COMMENT ON FUNCTION master_append_table_to_shard(bigint, text, text, integer)
GRANT ALL ON FUNCTION start_metadata_sync_to_node(text, integer) TO PUBLIC; GRANT ALL ON FUNCTION start_metadata_sync_to_node(text, integer) TO PUBLIC;
GRANT ALL ON FUNCTION stop_metadata_sync_to_node(text, integer,bool) TO PUBLIC; GRANT ALL ON FUNCTION stop_metadata_sync_to_node(text, integer,bool) TO PUBLIC;
DROP FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer, force bool);
CREATE FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer)
RETURNS void
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$citus_disable_node$$;
COMMENT ON FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer)
IS 'removes node from the cluster temporarily';

View File

@ -0,0 +1,9 @@
DROP FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer);
CREATE FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer, force bool default false)
RETURNS void
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$citus_disable_node$$;
COMMENT ON FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer, force bool)
IS 'removes node from the cluster temporarily';
REVOKE ALL ON FUNCTION pg_catalog.citus_disable_node(text,int, bool) FROM PUBLIC;

View File

@ -1,8 +1,9 @@
CREATE FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer) DROP FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer);
CREATE FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer, force bool default false)
RETURNS void RETURNS void
LANGUAGE C STRICT LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$citus_disable_node$$; AS 'MODULE_PATHNAME', $$citus_disable_node$$;
COMMENT ON FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer) COMMENT ON FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer, force bool)
IS 'removes node from the cluster temporarily'; IS 'removes node from the cluster temporarily';
REVOKE ALL ON FUNCTION pg_catalog.citus_disable_node(text,int) FROM PUBLIC; REVOKE ALL ON FUNCTION pg_catalog.citus_disable_node(text,int, bool) FROM PUBLIC;

View File

@ -48,6 +48,7 @@ static StringInfo CopyShardPlacementToWorkerNodeQuery(
static void ReplicateShardToNode(ShardInterval *shardInterval, char *nodeName, static void ReplicateShardToNode(ShardInterval *shardInterval, char *nodeName,
int nodePort); int nodePort);
static bool AnyRelationsModifiedInTransaction(List *relationIdList); static bool AnyRelationsModifiedInTransaction(List *relationIdList);
static List * ReplicatedMetadataSyncedDistributedTableList(void);
/* exports for SQL callable functions */ /* exports for SQL callable functions */
PG_FUNCTION_INFO_V1(upgrade_to_reference_table); PG_FUNCTION_INFO_V1(upgrade_to_reference_table);
@ -426,46 +427,84 @@ CreateReferenceTableColocationId()
/* /*
* DeleteAllReferenceTablePlacementsFromNodeGroup function iterates over list of reference * DeleteAllReplicatedTablePlacementsFromNodeGroup function iterates over
* tables and deletes all reference table placements from pg_dist_placement table * list of reference and replicated hash distributed tables and deletes
* for given group. * all placements from pg_dist_placement table for given group.
*/ */
void void
DeleteAllReferenceTablePlacementsFromNodeGroup(int32 groupId) DeleteAllReplicatedTablePlacementsFromNodeGroup(int32 groupId, bool forceRemoteDelete)
{ {
List *referenceTableList = CitusTableTypeIdList(REFERENCE_TABLE); List *referenceTableList = CitusTableTypeIdList(REFERENCE_TABLE);
List *replicatedMetadataSyncedDistributedTableList =
ReplicatedMetadataSyncedDistributedTableList();
List *replicatedTableList =
list_concat(referenceTableList, replicatedMetadataSyncedDistributedTableList);
/* if there are no reference tables, we do not need to do anything */ /* if there are no reference tables, we do not need to do anything */
if (list_length(referenceTableList) == 0) if (list_length(replicatedTableList) == 0)
{ {
return; return;
} }
StringInfo deletePlacementCommand = makeStringInfo(); StringInfo deletePlacementCommand = makeStringInfo();
Oid referenceTableId = InvalidOid; Oid replicatedTableId = InvalidOid;
foreach_oid(referenceTableId, referenceTableList) foreach_oid(replicatedTableId, replicatedTableList)
{ {
List *placements = GroupShardPlacementsForTableOnGroup(referenceTableId, List *placements =
groupId); GroupShardPlacementsForTableOnGroup(replicatedTableId, groupId);
if (list_length(placements) == 0) if (list_length(placements) == 0)
{ {
/* this happens if the node was previously disabled */ /*
* This happens either the node was previously disabled or the table
* doesn't have placement on this node.
*/
continue; continue;
} }
GroupShardPlacement *placement = (GroupShardPlacement *) linitial(placements); GroupShardPlacement *placement = NULL;
foreach_ptr(placement, placements)
{
LockShardDistributionMetadata(placement->shardId, ExclusiveLock); LockShardDistributionMetadata(placement->shardId, ExclusiveLock);
DeleteShardPlacementRow(placement->placementId); DeleteShardPlacementRow(placement->placementId);
if (forceRemoteDelete)
{
resetStringInfo(deletePlacementCommand); resetStringInfo(deletePlacementCommand);
appendStringInfo(deletePlacementCommand, appendStringInfo(deletePlacementCommand,
"DELETE FROM pg_dist_placement WHERE placementid = " "DELETE FROM pg_catalog.pg_dist_placement "
UINT64_FORMAT, "WHERE placementid = " UINT64_FORMAT,
placement->placementId); placement->placementId);
SendCommandToWorkersWithMetadata(deletePlacementCommand->data); SendCommandToWorkersWithMetadata(deletePlacementCommand->data);
} }
}
}
}
/*
* ReplicatedMetadataSyncedDistributedTableList is a helper function which returns the
* list of replicated hash distributed tables.
*/
static List *
ReplicatedMetadataSyncedDistributedTableList(void)
{
List *distributedRelationList = CitusTableTypeIdList(DISTRIBUTED_TABLE);
List *replicatedHashDistributedTableList = NIL;
Oid relationId = InvalidOid;
foreach_oid(relationId, distributedRelationList)
{
if (ShouldSyncTableMetadata(relationId) && !SingleReplicatedTable(relationId))
{
replicatedHashDistributedTableList =
lappend_oid(replicatedHashDistributedTableList, relationId);
}
}
return replicatedHashDistributedTableList;
} }

View File

@ -470,20 +470,6 @@ SingleReplicatedTable(Oid relationId)
return false; return false;
} }
/* for hash distributed tables, it is sufficient to only check one shard */
if (IsCitusTableType(relationId, HASH_DISTRIBUTED))
{
/* checking only for the first shard id should suffice */
uint64 shardId = *(uint64 *) linitial(shardList);
shardPlacementList = ShardPlacementListWithoutOrphanedPlacements(shardId);
if (list_length(shardPlacementList) != 1)
{
return false;
}
}
else
{
List *shardIntervalList = LoadShardList(relationId); List *shardIntervalList = LoadShardList(relationId);
uint64 *shardIdPointer = NULL; uint64 *shardIdPointer = NULL;
foreach_ptr(shardIdPointer, shardIntervalList) foreach_ptr(shardIdPointer, shardIntervalList)
@ -496,7 +482,6 @@ SingleReplicatedTable(Oid relationId)
return false; return false;
} }
} }
}
return true; return true;
} }

View File

@ -74,11 +74,10 @@ extern Oid GetAttributeTypeOid(Oid relationId, AttrNumber attnum);
"VALUES (" UINT64_FORMAT ", %d, " UINT64_FORMAT \ "VALUES (" UINT64_FORMAT ", %d, " UINT64_FORMAT \
", %d, " UINT64_FORMAT \ ", %d, " UINT64_FORMAT \
") " \ ") " \
"ON CONFLICT (placementid) DO UPDATE SET " \ "ON CONFLICT (shardid, groupid) DO UPDATE SET " \
"shardid = EXCLUDED.shardid, " \
"shardstate = EXCLUDED.shardstate, " \ "shardstate = EXCLUDED.shardstate, " \
"shardlength = EXCLUDED.shardlength, " \ "shardlength = EXCLUDED.shardlength, " \
"groupid = EXCLUDED.groupid" "placementid = EXCLUDED.placementid"
#define METADATA_SYNC_CHANNEL "metadata_sync" #define METADATA_SYNC_CHANNEL "metadata_sync"

View File

@ -21,7 +21,8 @@
extern void EnsureReferenceTablesExistOnAllNodes(void); extern void EnsureReferenceTablesExistOnAllNodes(void);
extern void EnsureReferenceTablesExistOnAllNodesExtended(char transferMode); extern void EnsureReferenceTablesExistOnAllNodesExtended(char transferMode);
extern uint32 CreateReferenceTableColocationId(void); extern uint32 CreateReferenceTableColocationId(void);
extern void DeleteAllReferenceTablePlacementsFromNodeGroup(int32 groupId); extern void DeleteAllReplicatedTablePlacementsFromNodeGroup(int32 groupId, bool
forceRemoteDelete);
extern int CompareOids(const void *leftElement, const void *rightElement); extern int CompareOids(const void *leftElement, const void *rightElement);
extern int ReferenceTableReplicationFactor(void); extern int ReferenceTableReplicationFactor(void);
extern void ReplicateAllReferenceTablesToNode(char *nodeName, int nodePort); extern void ReplicateAllReferenceTablesToNode(char *nodeName, int nodePort);

View File

@ -1,5 +1,5 @@
test: multi_cluster_management
test: multi_test_helpers multi_test_helpers_superuser columnar_test_helpers test: multi_test_helpers multi_test_helpers_superuser columnar_test_helpers
test: multi_cluster_management
test: multi_test_catalog_views test: multi_test_catalog_views
test: columnar_create test: columnar_create

View File

@ -1,7 +1,7 @@
-- --
-- failure_add_disable_node tests master_add_node, master_remove_node -- failure_add_disable_node tests master_add_node, master_remove_node
-- master_activate_node for failures. -- master_activate_node for failures.
-- master_disable_node and master_add_inactive_node can not be -- citus_disable_node_and_wait and master_add_inactive_node can not be
-- tested as they don't create network activity -- tested as they don't create network activity
-- --
SELECT citus.mitmproxy('conn.allow()'); SELECT citus.mitmproxy('conn.allow()');
@ -53,9 +53,15 @@ ORDER BY placementid;
200000 | 1 200000 | 1
(2 rows) (2 rows)
SELECT master_disable_node('localhost', :worker_2_proxy_port); SELECT citus_disable_node('localhost', :worker_2_proxy_port, true);
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT citus_activate_node('localhost', 9060) to activate this node back. NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT citus_activate_node('localhost', 9060) to activate this node back.
master_disable_node citus_disable_node
---------------------------------------------------------------------
(1 row)
SELECT public.wait_until_metadata_sync();
wait_until_metadata_sync
--------------------------------------------------------------------- ---------------------------------------------------------------------
(1 row) (1 row)

View File

@ -10,6 +10,10 @@ SELECT pg_reload_conf();
t t
(1 row) (1 row)
CREATE OR REPLACE FUNCTION wait_until_metadata_sync(timeout INTEGER DEFAULT 15000)
RETURNS void
LANGUAGE C STRICT
AS 'citus';
-- Add some helper functions for sending commands to mitmproxy -- Add some helper functions for sending commands to mitmproxy
CREATE FUNCTION citus.mitmproxy(text) RETURNS TABLE(result text) AS $$ CREATE FUNCTION citus.mitmproxy(text) RETURNS TABLE(result text) AS $$
DECLARE DECLARE

View File

@ -66,6 +66,8 @@ NOTICE: cleaned up 2 orphaned shards
SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_to_reference_shard_rebalance.%' AND refd_relid LIKE 'fkey_to_reference_shard_rebalance.%' ORDER BY 1,2,3; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_to_reference_shard_rebalance.%' AND refd_relid LIKE 'fkey_to_reference_shard_rebalance.%' ORDER BY 1,2,3;
name | relid | refd_relid name | relid | refd_relid
--------------------------------------------------------------------- ---------------------------------------------------------------------
referencing_table2_id_fkey | fkey_to_reference_shard_rebalance.referencing_table2 | fkey_to_reference_shard_rebalance.referencing_table
referencing_table2_id_fkey | fkey_to_reference_shard_rebalance.referencing_table2 | fkey_to_reference_shard_rebalance.referencing_table
referencing_table2_id_fkey_15000009 | fkey_to_reference_shard_rebalance.referencing_table2_15000009 | fkey_to_reference_shard_rebalance.referencing_table_15000001 referencing_table2_id_fkey_15000009 | fkey_to_reference_shard_rebalance.referencing_table2_15000009 | fkey_to_reference_shard_rebalance.referencing_table_15000001
referencing_table2_id_fkey_15000010 | fkey_to_reference_shard_rebalance.referencing_table2_15000010 | fkey_to_reference_shard_rebalance.referencing_table_15000002 referencing_table2_id_fkey_15000010 | fkey_to_reference_shard_rebalance.referencing_table2_15000010 | fkey_to_reference_shard_rebalance.referencing_table_15000002
referencing_table2_id_fkey_15000011 | fkey_to_reference_shard_rebalance.referencing_table2_15000011 | fkey_to_reference_shard_rebalance.referencing_table_15000003 referencing_table2_id_fkey_15000011 | fkey_to_reference_shard_rebalance.referencing_table2_15000011 | fkey_to_reference_shard_rebalance.referencing_table_15000003
@ -74,6 +76,8 @@ SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_to_reference_shard_r
referencing_table2_id_fkey_15000014 | fkey_to_reference_shard_rebalance.referencing_table2_15000014 | fkey_to_reference_shard_rebalance.referencing_table_15000006 referencing_table2_id_fkey_15000014 | fkey_to_reference_shard_rebalance.referencing_table2_15000014 | fkey_to_reference_shard_rebalance.referencing_table_15000006
referencing_table2_id_fkey_15000015 | fkey_to_reference_shard_rebalance.referencing_table2_15000015 | fkey_to_reference_shard_rebalance.referencing_table_15000007 referencing_table2_id_fkey_15000015 | fkey_to_reference_shard_rebalance.referencing_table2_15000015 | fkey_to_reference_shard_rebalance.referencing_table_15000007
referencing_table2_id_fkey_15000016 | fkey_to_reference_shard_rebalance.referencing_table2_15000016 | fkey_to_reference_shard_rebalance.referencing_table_15000008 referencing_table2_id_fkey_15000016 | fkey_to_reference_shard_rebalance.referencing_table2_15000016 | fkey_to_reference_shard_rebalance.referencing_table_15000008
referencing_table2_ref_id_fkey | fkey_to_reference_shard_rebalance.referencing_table2 | fkey_to_reference_shard_rebalance.referenced_table
referencing_table2_ref_id_fkey | fkey_to_reference_shard_rebalance.referencing_table2 | fkey_to_reference_shard_rebalance.referenced_table
referencing_table2_ref_id_fkey_15000009 | fkey_to_reference_shard_rebalance.referencing_table2_15000009 | fkey_to_reference_shard_rebalance.referenced_table_15000000 referencing_table2_ref_id_fkey_15000009 | fkey_to_reference_shard_rebalance.referencing_table2_15000009 | fkey_to_reference_shard_rebalance.referenced_table_15000000
referencing_table2_ref_id_fkey_15000010 | fkey_to_reference_shard_rebalance.referencing_table2_15000010 | fkey_to_reference_shard_rebalance.referenced_table_15000000 referencing_table2_ref_id_fkey_15000010 | fkey_to_reference_shard_rebalance.referencing_table2_15000010 | fkey_to_reference_shard_rebalance.referenced_table_15000000
referencing_table2_ref_id_fkey_15000011 | fkey_to_reference_shard_rebalance.referencing_table2_15000011 | fkey_to_reference_shard_rebalance.referenced_table_15000000 referencing_table2_ref_id_fkey_15000011 | fkey_to_reference_shard_rebalance.referencing_table2_15000011 | fkey_to_reference_shard_rebalance.referenced_table_15000000
@ -82,6 +86,8 @@ SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_to_reference_shard_r
referencing_table2_ref_id_fkey_15000014 | fkey_to_reference_shard_rebalance.referencing_table2_15000014 | fkey_to_reference_shard_rebalance.referenced_table_15000000 referencing_table2_ref_id_fkey_15000014 | fkey_to_reference_shard_rebalance.referencing_table2_15000014 | fkey_to_reference_shard_rebalance.referenced_table_15000000
referencing_table2_ref_id_fkey_15000015 | fkey_to_reference_shard_rebalance.referencing_table2_15000015 | fkey_to_reference_shard_rebalance.referenced_table_15000000 referencing_table2_ref_id_fkey_15000015 | fkey_to_reference_shard_rebalance.referencing_table2_15000015 | fkey_to_reference_shard_rebalance.referenced_table_15000000
referencing_table2_ref_id_fkey_15000016 | fkey_to_reference_shard_rebalance.referencing_table2_15000016 | fkey_to_reference_shard_rebalance.referenced_table_15000000 referencing_table2_ref_id_fkey_15000016 | fkey_to_reference_shard_rebalance.referencing_table2_15000016 | fkey_to_reference_shard_rebalance.referenced_table_15000000
referencing_table_id_fkey | fkey_to_reference_shard_rebalance.referencing_table | fkey_to_reference_shard_rebalance.referenced_table
referencing_table_id_fkey | fkey_to_reference_shard_rebalance.referencing_table | fkey_to_reference_shard_rebalance.referenced_table
referencing_table_id_fkey_15000001 | fkey_to_reference_shard_rebalance.referencing_table_15000001 | fkey_to_reference_shard_rebalance.referenced_table_15000000 referencing_table_id_fkey_15000001 | fkey_to_reference_shard_rebalance.referencing_table_15000001 | fkey_to_reference_shard_rebalance.referenced_table_15000000
referencing_table_id_fkey_15000002 | fkey_to_reference_shard_rebalance.referencing_table_15000002 | fkey_to_reference_shard_rebalance.referenced_table_15000000 referencing_table_id_fkey_15000002 | fkey_to_reference_shard_rebalance.referencing_table_15000002 | fkey_to_reference_shard_rebalance.referenced_table_15000000
referencing_table_id_fkey_15000003 | fkey_to_reference_shard_rebalance.referencing_table_15000003 | fkey_to_reference_shard_rebalance.referenced_table_15000000 referencing_table_id_fkey_15000003 | fkey_to_reference_shard_rebalance.referencing_table_15000003 | fkey_to_reference_shard_rebalance.referenced_table_15000000
@ -90,7 +96,7 @@ SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_to_reference_shard_r
referencing_table_id_fkey_15000006 | fkey_to_reference_shard_rebalance.referencing_table_15000006 | fkey_to_reference_shard_rebalance.referenced_table_15000000 referencing_table_id_fkey_15000006 | fkey_to_reference_shard_rebalance.referencing_table_15000006 | fkey_to_reference_shard_rebalance.referenced_table_15000000
referencing_table_id_fkey_15000007 | fkey_to_reference_shard_rebalance.referencing_table_15000007 | fkey_to_reference_shard_rebalance.referenced_table_15000000 referencing_table_id_fkey_15000007 | fkey_to_reference_shard_rebalance.referencing_table_15000007 | fkey_to_reference_shard_rebalance.referenced_table_15000000
referencing_table_id_fkey_15000008 | fkey_to_reference_shard_rebalance.referencing_table_15000008 | fkey_to_reference_shard_rebalance.referenced_table_15000000 referencing_table_id_fkey_15000008 | fkey_to_reference_shard_rebalance.referencing_table_15000008 | fkey_to_reference_shard_rebalance.referenced_table_15000000
(24 rows) (30 rows)
SELECT master_move_shard_placement(15000009, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'block_writes'); SELECT master_move_shard_placement(15000009, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'block_writes');
master_move_shard_placement master_move_shard_placement
@ -109,6 +115,8 @@ NOTICE: cleaned up 2 orphaned shards
SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_to_reference_shard_rebalance.%' AND refd_relid LIKE 'fkey_to_reference_shard_rebalance.%' ORDER BY 1,2,3; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_to_reference_shard_rebalance.%' AND refd_relid LIKE 'fkey_to_reference_shard_rebalance.%' ORDER BY 1,2,3;
name | relid | refd_relid name | relid | refd_relid
--------------------------------------------------------------------- ---------------------------------------------------------------------
referencing_table2_id_fkey | fkey_to_reference_shard_rebalance.referencing_table2 | fkey_to_reference_shard_rebalance.referencing_table
referencing_table2_id_fkey | fkey_to_reference_shard_rebalance.referencing_table2 | fkey_to_reference_shard_rebalance.referencing_table
referencing_table2_id_fkey_15000009 | fkey_to_reference_shard_rebalance.referencing_table2_15000009 | fkey_to_reference_shard_rebalance.referencing_table_15000001 referencing_table2_id_fkey_15000009 | fkey_to_reference_shard_rebalance.referencing_table2_15000009 | fkey_to_reference_shard_rebalance.referencing_table_15000001
referencing_table2_id_fkey_15000010 | fkey_to_reference_shard_rebalance.referencing_table2_15000010 | fkey_to_reference_shard_rebalance.referencing_table_15000002 referencing_table2_id_fkey_15000010 | fkey_to_reference_shard_rebalance.referencing_table2_15000010 | fkey_to_reference_shard_rebalance.referencing_table_15000002
referencing_table2_id_fkey_15000011 | fkey_to_reference_shard_rebalance.referencing_table2_15000011 | fkey_to_reference_shard_rebalance.referencing_table_15000003 referencing_table2_id_fkey_15000011 | fkey_to_reference_shard_rebalance.referencing_table2_15000011 | fkey_to_reference_shard_rebalance.referencing_table_15000003
@ -117,6 +125,8 @@ SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_to_reference_shard_r
referencing_table2_id_fkey_15000014 | fkey_to_reference_shard_rebalance.referencing_table2_15000014 | fkey_to_reference_shard_rebalance.referencing_table_15000006 referencing_table2_id_fkey_15000014 | fkey_to_reference_shard_rebalance.referencing_table2_15000014 | fkey_to_reference_shard_rebalance.referencing_table_15000006
referencing_table2_id_fkey_15000015 | fkey_to_reference_shard_rebalance.referencing_table2_15000015 | fkey_to_reference_shard_rebalance.referencing_table_15000007 referencing_table2_id_fkey_15000015 | fkey_to_reference_shard_rebalance.referencing_table2_15000015 | fkey_to_reference_shard_rebalance.referencing_table_15000007
referencing_table2_id_fkey_15000016 | fkey_to_reference_shard_rebalance.referencing_table2_15000016 | fkey_to_reference_shard_rebalance.referencing_table_15000008 referencing_table2_id_fkey_15000016 | fkey_to_reference_shard_rebalance.referencing_table2_15000016 | fkey_to_reference_shard_rebalance.referencing_table_15000008
referencing_table2_ref_id_fkey | fkey_to_reference_shard_rebalance.referencing_table2 | fkey_to_reference_shard_rebalance.referenced_table
referencing_table2_ref_id_fkey | fkey_to_reference_shard_rebalance.referencing_table2 | fkey_to_reference_shard_rebalance.referenced_table
referencing_table2_ref_id_fkey_15000009 | fkey_to_reference_shard_rebalance.referencing_table2_15000009 | fkey_to_reference_shard_rebalance.referenced_table_15000000 referencing_table2_ref_id_fkey_15000009 | fkey_to_reference_shard_rebalance.referencing_table2_15000009 | fkey_to_reference_shard_rebalance.referenced_table_15000000
referencing_table2_ref_id_fkey_15000010 | fkey_to_reference_shard_rebalance.referencing_table2_15000010 | fkey_to_reference_shard_rebalance.referenced_table_15000000 referencing_table2_ref_id_fkey_15000010 | fkey_to_reference_shard_rebalance.referencing_table2_15000010 | fkey_to_reference_shard_rebalance.referenced_table_15000000
referencing_table2_ref_id_fkey_15000011 | fkey_to_reference_shard_rebalance.referencing_table2_15000011 | fkey_to_reference_shard_rebalance.referenced_table_15000000 referencing_table2_ref_id_fkey_15000011 | fkey_to_reference_shard_rebalance.referencing_table2_15000011 | fkey_to_reference_shard_rebalance.referenced_table_15000000
@ -125,6 +135,8 @@ SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_to_reference_shard_r
referencing_table2_ref_id_fkey_15000014 | fkey_to_reference_shard_rebalance.referencing_table2_15000014 | fkey_to_reference_shard_rebalance.referenced_table_15000000 referencing_table2_ref_id_fkey_15000014 | fkey_to_reference_shard_rebalance.referencing_table2_15000014 | fkey_to_reference_shard_rebalance.referenced_table_15000000
referencing_table2_ref_id_fkey_15000015 | fkey_to_reference_shard_rebalance.referencing_table2_15000015 | fkey_to_reference_shard_rebalance.referenced_table_15000000 referencing_table2_ref_id_fkey_15000015 | fkey_to_reference_shard_rebalance.referencing_table2_15000015 | fkey_to_reference_shard_rebalance.referenced_table_15000000
referencing_table2_ref_id_fkey_15000016 | fkey_to_reference_shard_rebalance.referencing_table2_15000016 | fkey_to_reference_shard_rebalance.referenced_table_15000000 referencing_table2_ref_id_fkey_15000016 | fkey_to_reference_shard_rebalance.referencing_table2_15000016 | fkey_to_reference_shard_rebalance.referenced_table_15000000
referencing_table_id_fkey | fkey_to_reference_shard_rebalance.referencing_table | fkey_to_reference_shard_rebalance.referenced_table
referencing_table_id_fkey | fkey_to_reference_shard_rebalance.referencing_table | fkey_to_reference_shard_rebalance.referenced_table
referencing_table_id_fkey_15000001 | fkey_to_reference_shard_rebalance.referencing_table_15000001 | fkey_to_reference_shard_rebalance.referenced_table_15000000 referencing_table_id_fkey_15000001 | fkey_to_reference_shard_rebalance.referencing_table_15000001 | fkey_to_reference_shard_rebalance.referenced_table_15000000
referencing_table_id_fkey_15000002 | fkey_to_reference_shard_rebalance.referencing_table_15000002 | fkey_to_reference_shard_rebalance.referenced_table_15000000 referencing_table_id_fkey_15000002 | fkey_to_reference_shard_rebalance.referencing_table_15000002 | fkey_to_reference_shard_rebalance.referenced_table_15000000
referencing_table_id_fkey_15000003 | fkey_to_reference_shard_rebalance.referencing_table_15000003 | fkey_to_reference_shard_rebalance.referenced_table_15000000 referencing_table_id_fkey_15000003 | fkey_to_reference_shard_rebalance.referencing_table_15000003 | fkey_to_reference_shard_rebalance.referenced_table_15000000
@ -133,7 +145,7 @@ SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_to_reference_shard_r
referencing_table_id_fkey_15000006 | fkey_to_reference_shard_rebalance.referencing_table_15000006 | fkey_to_reference_shard_rebalance.referenced_table_15000000 referencing_table_id_fkey_15000006 | fkey_to_reference_shard_rebalance.referencing_table_15000006 | fkey_to_reference_shard_rebalance.referenced_table_15000000
referencing_table_id_fkey_15000007 | fkey_to_reference_shard_rebalance.referencing_table_15000007 | fkey_to_reference_shard_rebalance.referenced_table_15000000 referencing_table_id_fkey_15000007 | fkey_to_reference_shard_rebalance.referencing_table_15000007 | fkey_to_reference_shard_rebalance.referenced_table_15000000
referencing_table_id_fkey_15000008 | fkey_to_reference_shard_rebalance.referencing_table_15000008 | fkey_to_reference_shard_rebalance.referenced_table_15000000 referencing_table_id_fkey_15000008 | fkey_to_reference_shard_rebalance.referencing_table_15000008 | fkey_to_reference_shard_rebalance.referenced_table_15000000
(24 rows) (30 rows)
-- create a function to show the -- create a function to show the
CREATE FUNCTION get_foreign_key_to_reference_table_commands(Oid) CREATE FUNCTION get_foreign_key_to_reference_table_commands(Oid)

View File

@ -129,6 +129,7 @@ SELECT * FROM create_distributed_table('rep1', 'id');
-- Add the coordinator, so we can have a replicated shard -- Add the coordinator, so we can have a replicated shard
SELECT 1 FROM citus_add_node('localhost', :master_port, 0); SELECT 1 FROM citus_add_node('localhost', :master_port, 0);
NOTICE: Replicating reference table "ref" to the node localhost:xxxxx NOTICE: Replicating reference table "ref" to the node localhost:xxxxx
NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1
@ -249,12 +250,18 @@ SET LOCAL citus.log_local_commands TO ON;
INSERT INTO rep1 VALUES (1); INSERT INTO rep1 VALUES (1);
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SELECT lock_shard_resources(3, ARRAY[92448300])
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing INSERT INTO ignoring_orphaned_shards.rep1_92448300 (id) VALUES (1) NOTICE: issuing INSERT INTO ignoring_orphaned_shards.rep1_92448300 (id) VALUES (1)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: executing the command locally: INSERT INTO ignoring_orphaned_shards.rep1_92448300 (id) VALUES (1) NOTICE: executing the command locally: INSERT INTO ignoring_orphaned_shards.rep1_92448300 (id) VALUES (1)
ROLLBACK; ROLLBACK;
NOTICE: issuing ROLLBACK NOTICE: issuing ROLLBACK
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing ROLLBACK
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- Cause the orphaned shard to be local -- Cause the orphaned shard to be local
SELECT 1 FROM citus_drain_node('localhost', :master_port); SELECT 1 FROM citus_drain_node('localhost', :master_port);
NOTICE: Moving shard xxxxx from localhost:xxxxx to localhost:xxxxx ... NOTICE: Moving shard xxxxx from localhost:xxxxx to localhost:xxxxx ...
@ -278,12 +285,14 @@ SET LOCAL citus.log_local_commands TO ON;
INSERT INTO rep1 VALUES (1); INSERT INTO rep1 VALUES (1);
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing INSERT INTO ignoring_orphaned_shards.rep1_92448300 (id) VALUES (1) NOTICE: issuing SELECT lock_shard_resources(3, ARRAY[92448300])
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing INSERT INTO ignoring_orphaned_shards.rep1_92448300 (id) VALUES (1) NOTICE: issuing INSERT INTO ignoring_orphaned_shards.rep1_92448300 (id) VALUES (1)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing INSERT INTO ignoring_orphaned_shards.rep1_92448300 (id) VALUES (1)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
ROLLBACK; ROLLBACK;
NOTICE: issuing ROLLBACK NOTICE: issuing ROLLBACK
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx

View File

@ -1,11 +1,6 @@
Parsed test spec with 2 sessions Parsed test spec with 2 sessions
starting permutation: s1-begin s1-add-node-1 s2-remove-node-1 s1-commit s1-show-nodes starting permutation: s1-begin s1-add-node-1 s2-remove-node-1 s1-commit s1-show-nodes
?column?
---------------------------------------------------------------------
1
(1 row)
step s1-begin: step s1-begin:
BEGIN; BEGIN;
@ -42,11 +37,6 @@ master_remove_node
starting permutation: s1-begin s1-add-node-1 s2-add-node-2 s1-commit s1-show-nodes starting permutation: s1-begin s1-add-node-1 s2-add-node-2 s1-commit s1-show-nodes
?column?
---------------------------------------------------------------------
1
(1 row)
step s1-begin: step s1-begin:
BEGIN; BEGIN;
@ -87,11 +77,6 @@ master_remove_node
starting permutation: s1-begin s1-add-node-1 s2-add-node-1 s1-commit s1-show-nodes starting permutation: s1-begin s1-add-node-1 s2-add-node-1 s1-commit s1-show-nodes
?column?
---------------------------------------------------------------------
1
(1 row)
step s1-begin: step s1-begin:
BEGIN; BEGIN;
@ -130,11 +115,6 @@ master_remove_node
starting permutation: s1-begin s1-add-node-1 s2-add-node-2 s1-abort s1-show-nodes starting permutation: s1-begin s1-add-node-1 s2-add-node-2 s1-abort s1-show-nodes
?column?
---------------------------------------------------------------------
1
(1 row)
step s1-begin: step s1-begin:
BEGIN; BEGIN;
@ -173,11 +153,6 @@ master_remove_node
starting permutation: s1-begin s1-add-node-1 s2-add-node-1 s1-abort s1-show-nodes starting permutation: s1-begin s1-add-node-1 s2-add-node-1 s1-abort s1-show-nodes
?column?
---------------------------------------------------------------------
1
(1 row)
step s1-begin: step s1-begin:
BEGIN; BEGIN;
@ -216,11 +191,6 @@ master_remove_node
starting permutation: s1-add-node-1 s1-add-node-2 s1-begin s1-remove-node-1 s2-remove-node-2 s1-commit s1-show-nodes starting permutation: s1-add-node-1 s1-add-node-2 s1-begin s1-remove-node-1 s2-remove-node-2 s1-commit s1-show-nodes
?column?
---------------------------------------------------------------------
1
(1 row)
step s1-add-node-1: step s1-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637); SELECT 1 FROM master_add_node('localhost', 57637);
@ -273,11 +243,6 @@ master_remove_node
starting permutation: s1-add-node-1 s1-begin s1-remove-node-1 s2-remove-node-1 s1-commit s1-show-nodes starting permutation: s1-add-node-1 s1-begin s1-remove-node-1 s2-remove-node-1 s1-commit s1-show-nodes
?column?
---------------------------------------------------------------------
1
(1 row)
step s1-add-node-1: step s1-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637); SELECT 1 FROM master_add_node('localhost', 57637);
@ -318,11 +283,6 @@ master_remove_node
starting permutation: s1-add-node-1 s1-begin s1-activate-node-1 s2-activate-node-1 s1-commit s1-show-nodes starting permutation: s1-add-node-1 s1-begin s1-activate-node-1 s2-activate-node-1 s1-commit s1-show-nodes
?column?
---------------------------------------------------------------------
1
(1 row)
step s1-add-node-1: step s1-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637); SELECT 1 FROM master_add_node('localhost', 57637);
@ -369,11 +329,6 @@ master_remove_node
starting permutation: s1-add-node-1 s1-begin s1-disable-node-1 s2-disable-node-1 s1-commit s1-show-nodes starting permutation: s1-add-node-1 s1-begin s1-disable-node-1 s2-disable-node-1 s1-commit s1-show-nodes
?column?
---------------------------------------------------------------------
1
(1 row)
step s1-add-node-1: step s1-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637); SELECT 1 FROM master_add_node('localhost', 57637);
@ -387,14 +342,21 @@ step s1-begin:
step s1-disable-node-1: step s1-disable-node-1:
SELECT 1 FROM master_disable_node('localhost', 57637); SELECT 1 FROM master_disable_node('localhost', 57637);
SELECT public.wait_until_metadata_sync();
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1
(1 row) (1 row)
wait_until_metadata_sync
---------------------------------------------------------------------
(1 row)
step s2-disable-node-1: step s2-disable-node-1:
SELECT 1 FROM master_disable_node('localhost', 57637); SELECT 1 FROM master_disable_node('localhost', 57637);
SELECT public.wait_until_metadata_sync();
<waiting ...> <waiting ...>
step s1-commit: step s1-commit:
COMMIT; COMMIT;
@ -405,6 +367,11 @@ step s2-disable-node-1: <... completed>
1 1
(1 row) (1 row)
wait_until_metadata_sync
---------------------------------------------------------------------
(1 row)
step s1-show-nodes: step s1-show-nodes:
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
@ -420,11 +387,6 @@ master_remove_node
starting permutation: s1-add-inactive-1 s1-begin s1-activate-node-1 s2-activate-node-1 s1-commit s1-show-nodes starting permutation: s1-add-inactive-1 s1-begin s1-activate-node-1 s2-activate-node-1 s1-commit s1-show-nodes
?column?
---------------------------------------------------------------------
1
(1 row)
step s1-add-inactive-1: step s1-add-inactive-1:
SELECT 1 FROM master_add_inactive_node('localhost', 57637); SELECT 1 FROM master_add_inactive_node('localhost', 57637);
@ -471,11 +433,6 @@ master_remove_node
starting permutation: s1-add-inactive-1 s1-begin s1-disable-node-1 s2-disable-node-1 s1-commit s1-show-nodes starting permutation: s1-add-inactive-1 s1-begin s1-disable-node-1 s2-disable-node-1 s1-commit s1-show-nodes
?column?
---------------------------------------------------------------------
1
(1 row)
step s1-add-inactive-1: step s1-add-inactive-1:
SELECT 1 FROM master_add_inactive_node('localhost', 57637); SELECT 1 FROM master_add_inactive_node('localhost', 57637);
@ -489,14 +446,21 @@ step s1-begin:
step s1-disable-node-1: step s1-disable-node-1:
SELECT 1 FROM master_disable_node('localhost', 57637); SELECT 1 FROM master_disable_node('localhost', 57637);
SELECT public.wait_until_metadata_sync();
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1
(1 row) (1 row)
wait_until_metadata_sync
---------------------------------------------------------------------
(1 row)
step s2-disable-node-1: step s2-disable-node-1:
SELECT 1 FROM master_disable_node('localhost', 57637); SELECT 1 FROM master_disable_node('localhost', 57637);
SELECT public.wait_until_metadata_sync();
<waiting ...> <waiting ...>
step s1-commit: step s1-commit:
COMMIT; COMMIT;
@ -507,6 +471,11 @@ step s2-disable-node-1: <... completed>
1 1
(1 row) (1 row)
wait_until_metadata_sync
---------------------------------------------------------------------
(1 row)
step s1-show-nodes: step s1-show-nodes:
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
@ -522,11 +491,6 @@ master_remove_node
starting permutation: s1-add-node-1 s1-begin s1-disable-node-1 s2-activate-node-1 s1-commit s1-show-nodes starting permutation: s1-add-node-1 s1-begin s1-disable-node-1 s2-activate-node-1 s1-commit s1-show-nodes
?column?
---------------------------------------------------------------------
1
(1 row)
step s1-add-node-1: step s1-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637); SELECT 1 FROM master_add_node('localhost', 57637);
@ -540,12 +504,18 @@ step s1-begin:
step s1-disable-node-1: step s1-disable-node-1:
SELECT 1 FROM master_disable_node('localhost', 57637); SELECT 1 FROM master_disable_node('localhost', 57637);
SELECT public.wait_until_metadata_sync();
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1
(1 row) (1 row)
wait_until_metadata_sync
---------------------------------------------------------------------
(1 row)
step s2-activate-node-1: step s2-activate-node-1:
SELECT 1 FROM master_activate_node('localhost', 57637); SELECT 1 FROM master_activate_node('localhost', 57637);
<waiting ...> <waiting ...>
@ -573,11 +543,6 @@ master_remove_node
starting permutation: s1-add-node-1 s1-begin s1-activate-node-1 s2-disable-node-1 s1-commit s1-show-nodes starting permutation: s1-add-node-1 s1-begin s1-activate-node-1 s2-disable-node-1 s1-commit s1-show-nodes
?column?
---------------------------------------------------------------------
1
(1 row)
step s1-add-node-1: step s1-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637); SELECT 1 FROM master_add_node('localhost', 57637);
@ -599,6 +564,7 @@ step s1-activate-node-1:
step s2-disable-node-1: step s2-disable-node-1:
SELECT 1 FROM master_disable_node('localhost', 57637); SELECT 1 FROM master_disable_node('localhost', 57637);
SELECT public.wait_until_metadata_sync();
<waiting ...> <waiting ...>
step s1-commit: step s1-commit:
COMMIT; COMMIT;
@ -609,6 +575,11 @@ step s2-disable-node-1: <... completed>
1 1
(1 row) (1 row)
wait_until_metadata_sync
---------------------------------------------------------------------
(1 row)
step s1-show-nodes: step s1-show-nodes:
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
@ -624,11 +595,6 @@ master_remove_node
starting permutation: s1-add-inactive-1 s1-begin s1-disable-node-1 s2-activate-node-1 s1-commit s1-show-nodes starting permutation: s1-add-inactive-1 s1-begin s1-disable-node-1 s2-activate-node-1 s1-commit s1-show-nodes
?column?
---------------------------------------------------------------------
1
(1 row)
step s1-add-inactive-1: step s1-add-inactive-1:
SELECT 1 FROM master_add_inactive_node('localhost', 57637); SELECT 1 FROM master_add_inactive_node('localhost', 57637);
@ -642,12 +608,18 @@ step s1-begin:
step s1-disable-node-1: step s1-disable-node-1:
SELECT 1 FROM master_disable_node('localhost', 57637); SELECT 1 FROM master_disable_node('localhost', 57637);
SELECT public.wait_until_metadata_sync();
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1
(1 row) (1 row)
wait_until_metadata_sync
---------------------------------------------------------------------
(1 row)
step s2-activate-node-1: step s2-activate-node-1:
SELECT 1 FROM master_activate_node('localhost', 57637); SELECT 1 FROM master_activate_node('localhost', 57637);
<waiting ...> <waiting ...>
@ -675,11 +647,6 @@ master_remove_node
starting permutation: s1-add-inactive-1 s1-begin s1-activate-node-1 s2-disable-node-1 s1-commit s1-show-nodes starting permutation: s1-add-inactive-1 s1-begin s1-activate-node-1 s2-disable-node-1 s1-commit s1-show-nodes
?column?
---------------------------------------------------------------------
1
(1 row)
step s1-add-inactive-1: step s1-add-inactive-1:
SELECT 1 FROM master_add_inactive_node('localhost', 57637); SELECT 1 FROM master_add_inactive_node('localhost', 57637);
@ -701,6 +668,7 @@ step s1-activate-node-1:
step s2-disable-node-1: step s2-disable-node-1:
SELECT 1 FROM master_disable_node('localhost', 57637); SELECT 1 FROM master_disable_node('localhost', 57637);
SELECT public.wait_until_metadata_sync();
<waiting ...> <waiting ...>
step s1-commit: step s1-commit:
COMMIT; COMMIT;
@ -711,57 +679,11 @@ step s2-disable-node-1: <... completed>
1 1
(1 row) (1 row)
step s1-show-nodes: wait_until_metadata_sync
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
nodename |nodeport|isactive
---------------------------------------------------------------------
localhost| 57637|f
(1 row)
master_remove_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
(1 row) (1 row)
starting permutation: s1-add-inactive-1 s1-begin s1-activate-node-1 s2-disable-node-1 s1-abort s1-show-nodes
?column?
---------------------------------------------------------------------
1
(1 row)
step s1-add-inactive-1:
SELECT 1 FROM master_add_inactive_node('localhost', 57637);
?column?
---------------------------------------------------------------------
1
(1 row)
step s1-begin:
BEGIN;
step s1-activate-node-1:
SELECT 1 FROM master_activate_node('localhost', 57637);
?column?
---------------------------------------------------------------------
1
(1 row)
step s2-disable-node-1:
SELECT 1 FROM master_disable_node('localhost', 57637);
<waiting ...>
step s1-abort:
ABORT;
step s2-disable-node-1: <... completed>
?column?
---------------------------------------------------------------------
1
(1 row)
step s1-show-nodes: step s1-show-nodes:
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
@ -777,11 +699,6 @@ master_remove_node
starting permutation: s1-add-node-1 s1-begin s1-disable-node-1 s2-disable-node-1 s1-abort s1-show-nodes starting permutation: s1-add-node-1 s1-begin s1-disable-node-1 s2-disable-node-1 s1-abort s1-show-nodes
?column?
---------------------------------------------------------------------
1
(1 row)
step s1-add-node-1: step s1-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637); SELECT 1 FROM master_add_node('localhost', 57637);
@ -795,14 +712,21 @@ step s1-begin:
step s1-disable-node-1: step s1-disable-node-1:
SELECT 1 FROM master_disable_node('localhost', 57637); SELECT 1 FROM master_disable_node('localhost', 57637);
SELECT public.wait_until_metadata_sync();
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1
(1 row) (1 row)
wait_until_metadata_sync
---------------------------------------------------------------------
(1 row)
step s2-disable-node-1: step s2-disable-node-1:
SELECT 1 FROM master_disable_node('localhost', 57637); SELECT 1 FROM master_disable_node('localhost', 57637);
SELECT public.wait_until_metadata_sync();
<waiting ...> <waiting ...>
step s1-abort: step s1-abort:
ABORT; ABORT;
@ -813,6 +737,11 @@ step s2-disable-node-1: <... completed>
1 1
(1 row) (1 row)
wait_until_metadata_sync
---------------------------------------------------------------------
(1 row)
step s1-show-nodes: step s1-show-nodes:
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;

View File

@ -3,9 +3,21 @@ Parsed test spec with 4 sessions
starting permutation: s1-begin s2-begin s1-update s2-update detector-dump-wait-edges s1-abort s2-abort starting permutation: s1-begin s2-begin s1-update s2-update detector-dump-wait-edges s1-abort s2-abort
step s1-begin: step s1-begin:
BEGIN; BEGIN;
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
assign_distributed_transaction_id
---------------------------------------------------------------------
(1 row)
step s2-begin: step s2-begin:
BEGIN; BEGIN;
SELECT assign_distributed_transaction_id(0, 9, '2021-07-09 15:41:55.542377+02');
assign_distributed_transaction_id
---------------------------------------------------------------------
(1 row)
step s1-update: step s1-update:
UPDATE distributed_table SET y = 1 WHERE x = 1; UPDATE distributed_table SET y = 1 WHERE x = 1;
@ -28,13 +40,13 @@ step detector-dump-wait-edges:
waiting_transaction_num|blocking_transaction_num|blocking_transaction_waiting waiting_transaction_num|blocking_transaction_num|blocking_transaction_waiting
--------------------------------------------------------------------- ---------------------------------------------------------------------
406| 405|f 9| 8|f
(1 row) (1 row)
transactionnumber|waitingtransactionnumbers transactionnumber|waitingtransactionnumbers
--------------------------------------------------------------------- ---------------------------------------------------------------------
405| 8|
406| 405 9| 8
(2 rows) (2 rows)
step s1-abort: step s1-abort:
@ -53,12 +65,30 @@ restore_isolation_tester_func
starting permutation: s1-begin s2-begin s3-begin s1-update s2-update s3-update detector-dump-wait-edges s1-abort s2-abort s3-abort starting permutation: s1-begin s2-begin s3-begin s1-update s2-update s3-update detector-dump-wait-edges s1-abort s2-abort s3-abort
step s1-begin: step s1-begin:
BEGIN; BEGIN;
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
assign_distributed_transaction_id
---------------------------------------------------------------------
(1 row)
step s2-begin: step s2-begin:
BEGIN; BEGIN;
SELECT assign_distributed_transaction_id(0, 9, '2021-07-09 15:41:55.542377+02');
assign_distributed_transaction_id
---------------------------------------------------------------------
(1 row)
step s3-begin: step s3-begin:
BEGIN; BEGIN;
SELECT assign_distributed_transaction_id(0, 10, '2021-07-09 15:41:55.542377+02');
assign_distributed_transaction_id
---------------------------------------------------------------------
(1 row)
step s1-update: step s1-update:
UPDATE distributed_table SET y = 1 WHERE x = 1; UPDATE distributed_table SET y = 1 WHERE x = 1;
@ -84,16 +114,16 @@ step detector-dump-wait-edges:
waiting_transaction_num|blocking_transaction_num|blocking_transaction_waiting waiting_transaction_num|blocking_transaction_num|blocking_transaction_waiting
--------------------------------------------------------------------- ---------------------------------------------------------------------
410| 409|f 9| 8|f
411| 409|f 10| 8|f
411| 410|t 10| 9|t
(3 rows) (3 rows)
transactionnumber|waitingtransactionnumbers transactionnumber|waitingtransactionnumbers
--------------------------------------------------------------------- ---------------------------------------------------------------------
409| 8|
410|409 9|8
411|409,410 10|8,9
(3 rows) (3 rows)
step s1-abort: step s1-abort:

View File

@ -1,104 +0,0 @@
Parsed test spec with 4 sessions
starting permutation: s1-begin s2-begin s1-update s2-update detector-dump-wait-edges s1-abort s2-abort
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s1-update:
UPDATE distributed_table SET y = 1 WHERE x = 1;
step s2-update:
UPDATE distributed_table SET y = 2 WHERE x = 1;
<waiting ...>
step detector-dump-wait-edges:
SELECT
waiting_transaction_num,
blocking_transaction_num,
blocking_transaction_waiting
FROM
dump_global_wait_edges()
ORDER BY
waiting_transaction_num,
blocking_transaction_num,
blocking_transaction_waiting;
SELECT * FROM get_adjacency_list_wait_graph() ORDER BY 1;
waiting_transaction_num|blocking_transaction_num|blocking_transaction_waiting
---------------------------------------------------------------------
406| 405|f
(1 row)
transactionnumber|waitingtransactionnumbers
---------------------------------------------------------------------
405|
406| 405
(2 rows)
step s1-abort:
ABORT;
step s2-update: <... completed>
step s2-abort:
ABORT;
starting permutation: s1-begin s2-begin s3-begin s1-update s2-update s3-update detector-dump-wait-edges s1-abort s2-abort s3-abort
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s3-begin:
BEGIN;
step s1-update:
UPDATE distributed_table SET y = 1 WHERE x = 1;
step s2-update:
UPDATE distributed_table SET y = 2 WHERE x = 1;
<waiting ...>
step s3-update:
UPDATE distributed_table SET y = 3 WHERE x = 1;
<waiting ...>
step detector-dump-wait-edges:
SELECT
waiting_transaction_num,
blocking_transaction_num,
blocking_transaction_waiting
FROM
dump_global_wait_edges()
ORDER BY
waiting_transaction_num,
blocking_transaction_num,
blocking_transaction_waiting;
SELECT * FROM get_adjacency_list_wait_graph() ORDER BY 1;
waiting_transaction_num|blocking_transaction_num|blocking_transaction_waiting
---------------------------------------------------------------------
410| 409|f
411| 409|f
411| 410|t
(3 rows)
transactionnumber|waitingtransactionnumbers
---------------------------------------------------------------------
409|
410|409
411|409,410
(3 rows)
step s1-abort:
ABORT;
step s2-update: <... completed>
step s2-abort:
ABORT;
step s3-update: <... completed>
step s3-abort:
ABORT;

View File

@ -1,6 +1,6 @@
Parsed test spec with 3 sessions Parsed test spec with 3 sessions
starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-create-table s1-commit s2-print-distributed-objects starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-create-table s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1
@ -153,6 +153,32 @@ run_command_on_workers
(localhost,57638,t,0) (localhost,57638,t,0)
(2 rows) (2 rows)
step s3-drop-coordinator-schemas:
-- schema drops are not cascaded
-- and cannot be dropped in a single
-- transaction in teardown
-- because it'd self-deadlock
-- instead we drop the schemas
-- at the end of the permutations
DROP SCHEMA IF EXISTS myschema CASCADE;
DROP SCHEMA IF EXISTS myschema2 CASCADE;
step s3-drop-worker-schemas:
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
master_remove_node master_remove_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -160,7 +186,7 @@ master_remove_node
(2 rows) (2 rows)
starting permutation: s1-print-distributed-objects s1-begin s2-begin s1-add-worker s2-public-schema s2-create-table s1-commit s2-commit s2-print-distributed-objects starting permutation: s1-print-distributed-objects s1-begin s2-begin s1-add-worker s2-public-schema s2-create-table s1-commit s2-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1
@ -319,6 +345,32 @@ run_command_on_workers
(localhost,57638,t,0) (localhost,57638,t,0)
(2 rows) (2 rows)
step s3-drop-coordinator-schemas:
-- schema drops are not cascaded
-- and cannot be dropped in a single
-- transaction in teardown
-- because it'd self-deadlock
-- instead we drop the schemas
-- at the end of the permutations
DROP SCHEMA IF EXISTS myschema CASCADE;
DROP SCHEMA IF EXISTS myschema2 CASCADE;
step s3-drop-worker-schemas:
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
master_remove_node master_remove_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -326,7 +378,7 @@ master_remove_node
(2 rows) (2 rows)
starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-public-schema s2-create-table s1-add-worker s2-commit s1-commit s2-print-distributed-objects starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-public-schema s2-create-table s1-add-worker s2-commit s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1
@ -485,6 +537,32 @@ run_command_on_workers
(localhost,57638,t,0) (localhost,57638,t,0)
(2 rows) (2 rows)
step s3-drop-coordinator-schemas:
-- schema drops are not cascaded
-- and cannot be dropped in a single
-- transaction in teardown
-- because it'd self-deadlock
-- instead we drop the schemas
-- at the end of the permutations
DROP SCHEMA IF EXISTS myschema CASCADE;
DROP SCHEMA IF EXISTS myschema2 CASCADE;
step s3-drop-worker-schemas:
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
master_remove_node master_remove_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -492,7 +570,7 @@ master_remove_node
(2 rows) (2 rows)
starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-create-schema s2-create-table s1-commit s2-print-distributed-objects starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-create-schema s2-create-table s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1
@ -647,6 +725,32 @@ run_command_on_workers
(localhost,57638,t,0) (localhost,57638,t,0)
(2 rows) (2 rows)
step s3-drop-coordinator-schemas:
-- schema drops are not cascaded
-- and cannot be dropped in a single
-- transaction in teardown
-- because it'd self-deadlock
-- instead we drop the schemas
-- at the end of the permutations
DROP SCHEMA IF EXISTS myschema CASCADE;
DROP SCHEMA IF EXISTS myschema2 CASCADE;
step s3-drop-worker-schemas:
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
master_remove_node master_remove_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -654,7 +758,7 @@ master_remove_node
(2 rows) (2 rows)
starting permutation: s1-print-distributed-objects s1-begin s2-begin s1-add-worker s2-create-schema s2-create-table s1-commit s2-commit s2-print-distributed-objects starting permutation: s1-print-distributed-objects s1-begin s2-begin s1-add-worker s2-create-schema s2-create-table s1-commit s2-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1
@ -815,6 +919,32 @@ run_command_on_workers
(localhost,57638,t,0) (localhost,57638,t,0)
(2 rows) (2 rows)
step s3-drop-coordinator-schemas:
-- schema drops are not cascaded
-- and cannot be dropped in a single
-- transaction in teardown
-- because it'd self-deadlock
-- instead we drop the schemas
-- at the end of the permutations
DROP SCHEMA IF EXISTS myschema CASCADE;
DROP SCHEMA IF EXISTS myschema2 CASCADE;
step s3-drop-worker-schemas:
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
master_remove_node master_remove_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -822,7 +952,7 @@ master_remove_node
(2 rows) (2 rows)
starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-create-schema s2-create-table s1-add-worker s2-commit s1-commit s2-print-distributed-objects starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-create-schema s2-create-table s1-add-worker s2-commit s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1
@ -983,6 +1113,32 @@ run_command_on_workers
(localhost,57638,t,0) (localhost,57638,t,0)
(2 rows) (2 rows)
step s3-drop-coordinator-schemas:
-- schema drops are not cascaded
-- and cannot be dropped in a single
-- transaction in teardown
-- because it'd self-deadlock
-- instead we drop the schemas
-- at the end of the permutations
DROP SCHEMA IF EXISTS myschema CASCADE;
DROP SCHEMA IF EXISTS myschema2 CASCADE;
step s3-drop-worker-schemas:
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
master_remove_node master_remove_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -990,7 +1146,7 @@ master_remove_node
(2 rows) (2 rows)
starting permutation: s1-print-distributed-objects s2-create-schema s1-begin s2-begin s3-begin s1-add-worker s2-create-table s3-use-schema s3-create-table s1-commit s2-commit s3-commit s2-print-distributed-objects starting permutation: s1-print-distributed-objects s2-create-schema s1-begin s2-begin s1-add-worker s2-create-table s1-commit s2-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1
@ -1071,9 +1227,6 @@ step s1-begin:
step s2-begin: step s2-begin:
BEGIN; BEGIN;
step s3-begin:
BEGIN;
step s1-add-worker: step s1-add-worker:
SELECT 1 FROM master_add_node('localhost', 57638); SELECT 1 FROM master_add_node('localhost', 57638);
@ -1088,15 +1241,6 @@ step s2-create-table:
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('t1', 'a'); SELECT create_distributed_table('t1', 'a');
<waiting ...> <waiting ...>
step s3-use-schema:
SET search_path TO myschema;
step s3-create-table:
CREATE TABLE t2 (a int, b int);
-- session needs to have replication factor set to 1, can't do in setup
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('t2', 'a');
<waiting ...>
step s1-commit: step s1-commit:
COMMIT; COMMIT;
@ -1109,15 +1253,6 @@ create_distributed_table
step s2-commit: step s2-commit:
COMMIT; COMMIT;
step s3-create-table: <... completed>
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s3-commit:
COMMIT;
step s2-print-distributed-objects: step s2-print-distributed-objects:
-- print an overview of all distributed objects -- print an overview of all distributed objects
SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1;
@ -1172,6 +1307,32 @@ run_command_on_workers
(localhost,57638,t,0) (localhost,57638,t,0)
(2 rows) (2 rows)
step s3-drop-coordinator-schemas:
-- schema drops are not cascaded
-- and cannot be dropped in a single
-- transaction in teardown
-- because it'd self-deadlock
-- instead we drop the schemas
-- at the end of the permutations
DROP SCHEMA IF EXISTS myschema CASCADE;
DROP SCHEMA IF EXISTS myschema2 CASCADE;
step s3-drop-worker-schemas:
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
master_remove_node master_remove_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1179,7 +1340,7 @@ master_remove_node
(2 rows) (2 rows)
starting permutation: s1-print-distributed-objects s1-add-worker s2-create-schema s2-begin s3-begin s3-use-schema s2-create-table s3-create-table s2-commit s3-commit s2-print-distributed-objects starting permutation: s1-print-distributed-objects s1-add-worker s2-create-schema s2-begin s3-begin s3-use-schema s2-create-table s3-create-table s2-commit s3-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1
@ -1354,6 +1515,32 @@ run_command_on_workers
(localhost,57638,t,0) (localhost,57638,t,0)
(2 rows) (2 rows)
step s3-drop-coordinator-schemas:
-- schema drops are not cascaded
-- and cannot be dropped in a single
-- transaction in teardown
-- because it'd self-deadlock
-- instead we drop the schemas
-- at the end of the permutations
DROP SCHEMA IF EXISTS myschema CASCADE;
DROP SCHEMA IF EXISTS myschema2 CASCADE;
step s3-drop-worker-schemas:
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
master_remove_node master_remove_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1361,7 +1548,7 @@ master_remove_node
(2 rows) (2 rows)
starting permutation: s1-print-distributed-objects s1-begin s2-begin s3-begin s1-add-worker s2-create-schema s3-create-schema2 s2-create-table s3-create-table s1-commit s3-commit s2-commit s2-print-distributed-objects starting permutation: s1-print-distributed-objects s1-begin s2-begin s3-begin s1-add-worker s2-create-schema s3-create-schema2 s2-create-table s3-create-table s1-commit s3-commit s2-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1
@ -1545,6 +1732,32 @@ run_command_on_workers
(localhost,57638,t,0) (localhost,57638,t,0)
(2 rows) (2 rows)
step s3-drop-coordinator-schemas:
-- schema drops are not cascaded
-- and cannot be dropped in a single
-- transaction in teardown
-- because it'd self-deadlock
-- instead we drop the schemas
-- at the end of the permutations
DROP SCHEMA IF EXISTS myschema CASCADE;
DROP SCHEMA IF EXISTS myschema2 CASCADE;
step s3-drop-worker-schemas:
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
master_remove_node master_remove_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1552,7 +1765,7 @@ master_remove_node
(2 rows) (2 rows)
starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-create-type s1-commit s2-print-distributed-objects starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-create-type s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1
@ -1698,6 +1911,32 @@ run_command_on_workers
(localhost,57638,t,0) (localhost,57638,t,0)
(2 rows) (2 rows)
step s3-drop-coordinator-schemas:
-- schema drops are not cascaded
-- and cannot be dropped in a single
-- transaction in teardown
-- because it'd self-deadlock
-- instead we drop the schemas
-- at the end of the permutations
DROP SCHEMA IF EXISTS myschema CASCADE;
DROP SCHEMA IF EXISTS myschema2 CASCADE;
step s3-drop-worker-schemas:
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
master_remove_node master_remove_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1705,7 +1944,7 @@ master_remove_node
(2 rows) (2 rows)
starting permutation: s1-print-distributed-objects s1-begin s2-public-schema s2-create-type s1-add-worker s1-commit s2-print-distributed-objects starting permutation: s1-print-distributed-objects s1-begin s2-public-schema s2-create-type s1-add-worker s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1
@ -1850,6 +2089,32 @@ run_command_on_workers
(localhost,57638,t,0) (localhost,57638,t,0)
(2 rows) (2 rows)
step s3-drop-coordinator-schemas:
-- schema drops are not cascaded
-- and cannot be dropped in a single
-- transaction in teardown
-- because it'd self-deadlock
-- instead we drop the schemas
-- at the end of the permutations
DROP SCHEMA IF EXISTS myschema CASCADE;
DROP SCHEMA IF EXISTS myschema2 CASCADE;
step s3-drop-worker-schemas:
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
master_remove_node master_remove_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1857,7 +2122,7 @@ master_remove_node
(2 rows) (2 rows)
starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-create-schema s2-create-type s2-create-table-with-type s1-add-worker s2-commit s1-commit s2-print-distributed-objects starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-create-schema s2-create-type s2-create-table-with-type s1-add-worker s2-commit s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1
@ -2022,6 +2287,32 @@ run_command_on_workers
(localhost,57638,t,0) (localhost,57638,t,0)
(2 rows) (2 rows)
step s3-drop-coordinator-schemas:
-- schema drops are not cascaded
-- and cannot be dropped in a single
-- transaction in teardown
-- because it'd self-deadlock
-- instead we drop the schemas
-- at the end of the permutations
DROP SCHEMA IF EXISTS myschema CASCADE;
DROP SCHEMA IF EXISTS myschema2 CASCADE;
step s3-drop-worker-schemas:
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
master_remove_node master_remove_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -2029,7 +2320,7 @@ master_remove_node
(2 rows) (2 rows)
starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-distribute-function s1-commit s2-begin s2-commit s3-wait-for-metadata-sync s2-print-distributed-objects starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-distribute-function s1-commit s2-begin s2-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1
@ -2195,6 +2486,32 @@ run_command_on_workers
(localhost,57638,t,1) (localhost,57638,t,1)
(2 rows) (2 rows)
step s3-drop-coordinator-schemas:
-- schema drops are not cascaded
-- and cannot be dropped in a single
-- transaction in teardown
-- because it'd self-deadlock
-- instead we drop the schemas
-- at the end of the permutations
DROP SCHEMA IF EXISTS myschema CASCADE;
DROP SCHEMA IF EXISTS myschema2 CASCADE;
step s3-drop-worker-schemas:
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
master_remove_node master_remove_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -2202,7 +2519,7 @@ master_remove_node
(2 rows) (2 rows)
starting permutation: s1-print-distributed-objects s1-begin s2-public-schema s2-distribute-function s2-begin s2-commit s3-wait-for-metadata-sync s1-add-worker s1-commit s3-wait-for-metadata-sync s2-print-distributed-objects starting permutation: s1-print-distributed-objects s1-begin s2-public-schema s2-distribute-function s2-begin s2-commit s3-wait-for-metadata-sync s1-add-worker s1-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1
@ -2375,6 +2692,32 @@ run_command_on_workers
(localhost,57638,t,1) (localhost,57638,t,1)
(2 rows) (2 rows)
step s3-drop-coordinator-schemas:
-- schema drops are not cascaded
-- and cannot be dropped in a single
-- transaction in teardown
-- because it'd self-deadlock
-- instead we drop the schemas
-- at the end of the permutations
DROP SCHEMA IF EXISTS myschema CASCADE;
DROP SCHEMA IF EXISTS myschema2 CASCADE;
step s3-drop-worker-schemas:
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
master_remove_node master_remove_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -2382,7 +2725,7 @@ master_remove_node
(2 rows) (2 rows)
starting permutation: s1-print-distributed-objects s2-begin s2-create-schema s2-distribute-function s2-commit s3-wait-for-metadata-sync s1-begin s1-add-worker s1-commit s3-wait-for-metadata-sync s2-print-distributed-objects starting permutation: s1-print-distributed-objects s2-begin s2-create-schema s2-distribute-function s2-commit s3-wait-for-metadata-sync s1-begin s1-add-worker s1-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1
@ -2557,6 +2900,32 @@ run_command_on_workers
(localhost,57638,t,1) (localhost,57638,t,1)
(2 rows) (2 rows)
step s3-drop-coordinator-schemas:
-- schema drops are not cascaded
-- and cannot be dropped in a single
-- transaction in teardown
-- because it'd self-deadlock
-- instead we drop the schemas
-- at the end of the permutations
DROP SCHEMA IF EXISTS myschema CASCADE;
DROP SCHEMA IF EXISTS myschema2 CASCADE;
step s3-drop-worker-schemas:
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
master_remove_node master_remove_node
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -3,8 +3,8 @@ Parsed test spec with 2 sessions
starting permutation: s1-begin s1-update-node-1 s2-update-node-2 s1-commit s1-show-nodes starting permutation: s1-begin s1-update-node-1 s2-update-node-2 s1-commit s1-show-nodes
nodeid|nodename |nodeport nodeid|nodename |nodeport
--------------------------------------------------------------------- ---------------------------------------------------------------------
22|localhost| 57637 22|localhost| 57638
23|localhost| 57638 21|localhost| 57637
(2 rows) (2 rows)
step s1-begin: step s1-begin:
@ -43,8 +43,8 @@ step s1-show-nodes:
nodeid|nodename |nodeport|isactive nodeid|nodename |nodeport|isactive
--------------------------------------------------------------------- ---------------------------------------------------------------------
22|localhost| 58637|t 21|localhost| 58637|t
23|localhost| 58638|t 22|localhost| 58638|t
(2 rows) (2 rows)
nodeid|nodename|nodeport nodeid|nodename|nodeport
@ -55,8 +55,8 @@ nodeid|nodename|nodeport
starting permutation: s1-begin s1-update-node-1 s2-begin s2-update-node-1 s1-commit s2-abort s1-show-nodes starting permutation: s1-begin s1-update-node-1 s2-begin s2-update-node-1 s1-commit s2-abort s1-show-nodes
nodeid|nodename |nodeport nodeid|nodename |nodeport
--------------------------------------------------------------------- ---------------------------------------------------------------------
24|localhost| 57637 24|localhost| 57638
25|localhost| 57638 23|localhost| 57637
(2 rows) (2 rows)
step s1-begin: step s1-begin:
@ -101,8 +101,8 @@ step s1-show-nodes:
nodeid|nodename |nodeport|isactive nodeid|nodename |nodeport|isactive
--------------------------------------------------------------------- ---------------------------------------------------------------------
25|localhost| 57638|t 24|localhost| 57638|t
24|localhost| 58637|t 23|localhost| 58637|t
(2 rows) (2 rows)
nodeid|nodename|nodeport nodeid|nodename|nodeport
@ -113,8 +113,8 @@ nodeid|nodename|nodeport
starting permutation: s1-begin s1-update-node-1 s2-start-metadata-sync-node-2 s1-commit s2-verify-metadata starting permutation: s1-begin s1-update-node-1 s2-start-metadata-sync-node-2 s1-commit s2-verify-metadata
nodeid|nodename |nodeport nodeid|nodename |nodeport
--------------------------------------------------------------------- ---------------------------------------------------------------------
26|localhost| 57637 26|localhost| 57638
27|localhost| 57638 25|localhost| 57637
(2 rows) (2 rows)
step s1-begin: step s1-begin:
@ -152,13 +152,13 @@ step s2-verify-metadata:
nodeid|groupid|nodename |nodeport nodeid|groupid|nodename |nodeport
--------------------------------------------------------------------- ---------------------------------------------------------------------
26| 26|localhost| 58637 25| 25|localhost| 58637
27| 27|localhost| 57638 26| 26|localhost| 57638
(2 rows) (2 rows)
master_run_on_worker master_run_on_worker
--------------------------------------------------------------------- ---------------------------------------------------------------------
(localhost,57638,t,"[{""f1"": 26, ""f2"": 26, ""f3"": ""localhost"", ""f4"": 58637}, {""f1"": 27, ""f2"": 27, ""f3"": ""localhost"", ""f4"": 57638}]") (localhost,57638,t,"[{""f1"": 25, ""f2"": 25, ""f3"": ""localhost"", ""f4"": 58637}, {""f1"": 26, ""f2"": 26, ""f3"": ""localhost"", ""f4"": 57638}]")
(1 row) (1 row)
nodeid|nodename|nodeport nodeid|nodename|nodeport
@ -169,8 +169,8 @@ nodeid|nodename|nodeport
starting permutation: s2-create-table s1-begin s1-update-node-nonexistent s1-prepare-transaction s2-cache-prepared-statement s1-commit-prepared s2-execute-prepared s1-update-node-existent s2-drop-table starting permutation: s2-create-table s1-begin s1-update-node-nonexistent s1-prepare-transaction s2-cache-prepared-statement s1-commit-prepared s2-execute-prepared s1-update-node-existent s2-drop-table
nodeid|nodename |nodeport nodeid|nodename |nodeport
--------------------------------------------------------------------- ---------------------------------------------------------------------
28|localhost| 57637 28|localhost| 57638
29|localhost| 57638 27|localhost| 57637
(2 rows) (2 rows)
step s2-create-table: step s2-create-table:

View File

@ -57,7 +57,7 @@ SELECT master_get_active_worker_nodes();
(localhost,57637) (localhost,57637)
(1 row) (1 row)
-- try to disable a node with no placements see that node is removed -- try to disable a node with no placements see that node is s=removed
SELECT 1 FROM master_add_node('localhost', :worker_2_port); SELECT 1 FROM master_add_node('localhost', :worker_2_port);
WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker
DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created
@ -66,8 +66,14 @@ DETAIL: distributed objects are only kept in sync when citus.enable_object_prop
1 1
(1 row) (1 row)
SELECT master_disable_node('localhost', :worker_2_port); SELECT citus_disable_node('localhost', :worker_2_port);
master_disable_node citus_disable_node
---------------------------------------------------------------------
(1 row)
SELECT public.wait_until_metadata_sync();
wait_until_metadata_sync
--------------------------------------------------------------------- ---------------------------------------------------------------------
(1 row) (1 row)
@ -144,7 +150,7 @@ SELECT master_get_active_worker_nodes();
(localhost,57637) (localhost,57637)
(2 rows) (2 rows)
-- insert a row so that master_disable_node() exercises closing connections -- insert a row so that citus_disable_node() exercises closing connections
CREATE TABLE test_reference_table (y int primary key, name text); CREATE TABLE test_reference_table (y int primary key, name text);
SELECT create_reference_table('test_reference_table'); SELECT create_reference_table('test_reference_table');
create_reference_table create_reference_table
@ -158,29 +164,27 @@ SELECT citus_remove_node('localhost', :worker_2_port);
ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx
DETAIL: One of the table(s) that prevents the operation complete successfully is public.cluster_management_test DETAIL: One of the table(s) that prevents the operation complete successfully is public.cluster_management_test
HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables
-- try to disable a node with active placements see that node is removed -- try to disable a node with active placements
-- observe that a notification is displayed -- which should fail because there are some placements
SELECT master_disable_node('localhost', :worker_2_port); -- which are the only placements for a given shard
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT citus_activate_node('localhost', 57638) to activate this node back. SELECT citus_disable_node('localhost', :worker_2_port);
master_disable_node ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx
--------------------------------------------------------------------- DETAIL: One of the table(s) that prevents the operation complete successfully is public.cluster_management_test
HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables
(1 row)
SELECT master_get_active_worker_nodes(); SELECT master_get_active_worker_nodes();
master_get_active_worker_nodes master_get_active_worker_nodes
--------------------------------------------------------------------- ---------------------------------------------------------------------
(localhost,57638)
(localhost,57637) (localhost,57637)
(1 row) (2 rows)
-- try to disable a node which does not exist and see that an error is thrown -- try to disable a node which does not exist and see that an error is thrown
SELECT master_disable_node('localhost.noexist', 2345); SELECT citus_disable_node('localhost.noexist', 2345);
ERROR: node at "localhost.noexist:2345" does not exist ERROR: node at "localhost.noexist:2345" does not exist
-- drop the table without leaving a shard placement behind (messes up other tests) -- drop the table without leaving a shard placement behind (messes up other tests)
SELECT master_activate_node('localhost', :worker_2_port); SELECT master_activate_node('localhost', :worker_2_port);
WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker
DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created
NOTICE: Replicating reference table "test_reference_table" to the node localhost:xxxxx
master_activate_node master_activate_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
3 3
@ -205,7 +209,8 @@ GRANT EXECUTE ON FUNCTION master_activate_node(text,int) TO node_metadata_user;
GRANT EXECUTE ON FUNCTION master_add_inactive_node(text,int,int,noderole,name) TO node_metadata_user; GRANT EXECUTE ON FUNCTION master_add_inactive_node(text,int,int,noderole,name) TO node_metadata_user;
GRANT EXECUTE ON FUNCTION master_add_node(text,int,int,noderole,name) TO node_metadata_user; GRANT EXECUTE ON FUNCTION master_add_node(text,int,int,noderole,name) TO node_metadata_user;
GRANT EXECUTE ON FUNCTION master_add_secondary_node(text,int,text,int,name) TO node_metadata_user; GRANT EXECUTE ON FUNCTION master_add_secondary_node(text,int,text,int,name) TO node_metadata_user;
GRANT EXECUTE ON FUNCTION master_disable_node(text,int) TO node_metadata_user; GRANT EXECUTE ON FUNCTION citus_disable_node(text,int,bool) TO node_metadata_user;
GRANT EXECUTE ON FUNCTION citus_disable_node_and_wait(text,int,bool) TO node_metadata_user;
GRANT EXECUTE ON FUNCTION master_remove_node(text,int) TO node_metadata_user; GRANT EXECUTE ON FUNCTION master_remove_node(text,int) TO node_metadata_user;
GRANT EXECUTE ON FUNCTION master_update_node(int,text,int,bool,int) TO node_metadata_user; GRANT EXECUTE ON FUNCTION master_update_node(int,text,int,bool,int) TO node_metadata_user;
-- user needs permission for the pg_dist_node and pg_dist_local_group for metadata syncing -- user needs permission for the pg_dist_node and pg_dist_local_group for metadata syncing
@ -237,8 +242,8 @@ SELECT 1 FROM master_add_inactive_node('localhost', :worker_2_port + 1);
ERROR: permission denied for function master_add_inactive_node ERROR: permission denied for function master_add_inactive_node
SELECT 1 FROM master_activate_node('localhost', :worker_2_port + 1); SELECT 1 FROM master_activate_node('localhost', :worker_2_port + 1);
ERROR: permission denied for function master_activate_node ERROR: permission denied for function master_activate_node
SELECT 1 FROM master_disable_node('localhost', :worker_2_port + 1); SELECT 1 FROM citus_disable_node('localhost', :worker_2_port + 1);
ERROR: permission denied for function master_disable_node ERROR: permission denied for function citus_disable_node
SELECT 1 FROM master_remove_node('localhost', :worker_2_port + 1); SELECT 1 FROM master_remove_node('localhost', :worker_2_port + 1);
ERROR: permission denied for function master_remove_node ERROR: permission denied for function master_remove_node
SELECT 1 FROM master_add_node('localhost', :worker_2_port + 1); SELECT 1 FROM master_add_node('localhost', :worker_2_port + 1);
@ -265,12 +270,6 @@ DETAIL: distributed objects are only kept in sync when citus.enable_object_prop
1 1
(1 row) (1 row)
SELECT 1 FROM master_disable_node('localhost', :worker_2_port);
?column?
---------------------------------------------------------------------
1
(1 row)
SELECT 1 FROM master_remove_node('localhost', :worker_2_port); SELECT 1 FROM master_remove_node('localhost', :worker_2_port);
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -409,6 +408,22 @@ WARNING: could not find any shard placements for shardId 1220025
WARNING: could not find any shard placements for shardId 1220027 WARNING: could not find any shard placements for shardId 1220027
WARNING: could not find any shard placements for shardId 1220029 WARNING: could not find any shard placements for shardId 1220029
WARNING: could not find any shard placements for shardId 1220031 WARNING: could not find any shard placements for shardId 1220031
WARNING: could not find any shard placements for shardId 1220017
WARNING: could not find any shard placements for shardId 1220017
WARNING: could not find any shard placements for shardId 1220017
WARNING: could not find any shard placements for shardId 1220017
WARNING: could not find any shard placements for shardId 1220017
WARNING: could not find any shard placements for shardId 1220017
WARNING: could not find any shard placements for shardId 1220017
WARNING: could not find any shard placements for shardId 1220017
WARNING: could not find any shard placements for shardId 1220017
WARNING: could not find any shard placements for shardId 1220017
WARNING: could not find any shard placements for shardId 1220017
WARNING: could not find any shard placements for shardId 1220017
WARNING: could not find any shard placements for shardId 1220017
WARNING: could not find any shard placements for shardId 1220017
WARNING: could not find any shard placements for shardId 1220017
WARNING: could not find any shard placements for shardId 1220017
create_distributed_table create_distributed_table
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -833,8 +848,14 @@ SELECT master_activate_node('localhost', 9999);
22 22
(1 row) (1 row)
SELECT master_disable_node('localhost', 9999); SELECT citus_disable_node('localhost', 9999);
master_disable_node citus_disable_node
---------------------------------------------------------------------
(1 row)
SELECT public.wait_until_metadata_sync();
wait_until_metadata_sync
--------------------------------------------------------------------- ---------------------------------------------------------------------
(1 row) (1 row)

View File

@ -968,10 +968,12 @@ ALTER EXTENSION citus UPDATE TO '11.0-1';
SELECT * FROM multi_extension.print_extension_changes(); SELECT * FROM multi_extension.print_extension_changes();
previous_object | current_object previous_object | current_object
--------------------------------------------------------------------- ---------------------------------------------------------------------
function citus_disable_node(text,integer) void |
function master_append_table_to_shard(bigint,text,text,integer) real | function master_append_table_to_shard(bigint,text,text,integer) real |
function master_apply_delete_command(text) integer | function master_apply_delete_command(text) integer |
function master_get_table_metadata(text) record | function master_get_table_metadata(text) record |
(3 rows) | function citus_disable_node(text,integer,boolean) void
(5 rows)
DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff;
-- show running version -- show running version

View File

@ -1418,6 +1418,9 @@ CREATE TABLE tmp_placement AS
DELETE FROM pg_dist_placement DELETE FROM pg_dist_placement
WHERE groupid = :old_worker_2_group; WHERE groupid = :old_worker_2_group;
SELECT master_remove_node('localhost', :worker_2_port); SELECT master_remove_node('localhost', :worker_2_port);
WARNING: could not find any shard placements for shardId 1310001
WARNING: could not find any shard placements for shardId 1310021
WARNING: could not find any shard placements for shardId 1310026
master_remove_node master_remove_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1585,14 +1588,17 @@ HINT: If the node is up, wait until metadata gets synced to it and try again.
SELECT master_add_node('localhost', :master_port, groupid => 0); SELECT master_add_node('localhost', :master_port, groupid => 0);
ERROR: localhost:xxxxx is a metadata node, but is out of sync ERROR: localhost:xxxxx is a metadata node, but is out of sync
HINT: If the node is up, wait until metadata gets synced to it and try again. HINT: If the node is up, wait until metadata gets synced to it and try again.
SELECT master_disable_node('localhost', :worker_1_port); SELECT citus_disable_node_and_wait('localhost', :worker_1_port);
ERROR: Disabling localhost:xxxxx failed ERROR: disabling the first worker node in the metadata is not allowed
DETAIL: localhost:xxxxx is a metadata node, but is out of sync HINT: You can force disabling node, but this operation might cause replicated shards to diverge: SELECT citus_disable_node('localhost', 57637, force:=true);
HINT: If you are using MX, try stop_metadata_sync_to_node(hostname, port) for nodes that are down before disabling them. CONTEXT: SQL statement "SELECT pg_catalog.citus_disable_node(nodename, nodeport, force)"
SELECT master_disable_node('localhost', :worker_2_port); PL/pgSQL function citus_disable_node_and_wait(text,integer,boolean) line XX at PERFORM
ERROR: Disabling localhost:xxxxx failed SELECT citus_disable_node_and_wait('localhost', :worker_2_port);
DETAIL: localhost:xxxxx is a metadata node, but is out of sync ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx
HINT: If you are using MX, try stop_metadata_sync_to_node(hostname, port) for nodes that are down before disabling them. DETAIL: One of the table(s) that prevents the operation complete successfully is mx_testing_schema.mx_test_table
HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables
CONTEXT: SQL statement "SELECT pg_catalog.citus_disable_node(nodename, nodeport, force)"
PL/pgSQL function citus_disable_node_and_wait(text,integer,boolean) line XX at PERFORM
SELECT master_remove_node('localhost', :worker_1_port); SELECT master_remove_node('localhost', :worker_1_port);
ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx
DETAIL: One of the table(s) that prevents the operation complete successfully is mx_testing_schema.mx_test_table DETAIL: One of the table(s) that prevents the operation complete successfully is mx_testing_schema.mx_test_table

View File

@ -300,7 +300,7 @@ alter table local_fkey_table ALTER COLUMN id TYPE int;
SET citus.force_max_query_parallelization TO ON; SET citus.force_max_query_parallelization TO ON;
alter table distributed_table ALTER COLUMN value_1 TYPE bigint; alter table distributed_table ALTER COLUMN value_1 TYPE bigint;
alter table distributed_table ALTER COLUMN value_1 TYPE int; alter table distributed_table ALTER COLUMN value_1 TYPE int;
SET client_min_messages TO error; SET client_min_messages TO ERROR;
DROP TABLE distributed_table, referece_table, local_fkey_table; DROP TABLE distributed_table, referece_table, local_fkey_table;
SELECT master_remove_node('localhost', :master_port); SELECT master_remove_node('localhost', :master_port);
master_remove_node master_remove_node

View File

@ -46,7 +46,7 @@ SELECT master_add_node('localhost', :worker_1_port) As nodeid_1 \gset
SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node; SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node;
nodeid | nodename | nodeport | hasmetadata | metadatasynced nodeid | nodename | nodeport | hasmetadata | metadatasynced
--------------------------------------------------------------------- ---------------------------------------------------------------------
2 | localhost | 57637 | f | f 2 | localhost | 57637 | t | t
(1 row) (1 row)
-- create couple of tables -- create couple of tables
@ -83,7 +83,7 @@ SELECT 1 FROM master_update_node((SELECT nodeid FROM pg_dist_node),
SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node; SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node;
nodeid | nodename | nodeport | hasmetadata | metadatasynced nodeid | nodename | nodeport | hasmetadata | metadatasynced
--------------------------------------------------------------------- ---------------------------------------------------------------------
2 | localhost | 57638 | f | f 2 | localhost | 57638 | t | f
(1 row) (1 row)
-- start syncing metadata to the node -- start syncing metadata to the node
@ -641,10 +641,11 @@ SELECT verify_metadata('localhost', :worker_1_port),
--------------------------------------------------------------------- ---------------------------------------------------------------------
-- Don't drop the reference table so it has shards on the nodes being disabled -- Don't drop the reference table so it has shards on the nodes being disabled
DROP TABLE dist_table_1, dist_table_2; DROP TABLE dist_table_1, dist_table_2;
SELECT 1 FROM master_disable_node('localhost', :worker_2_port); SELECT pg_catalog.citus_disable_node_and_wait('localhost', :worker_2_port);
?column? NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT citus_activate_node('localhost', 57638) to activate this node back.
citus_disable_node_and_wait
--------------------------------------------------------------------- ---------------------------------------------------------------------
1
(1 row) (1 row)
SELECT verify_metadata('localhost', :worker_1_port); SELECT verify_metadata('localhost', :worker_1_port);
@ -666,7 +667,7 @@ SELECT verify_metadata('localhost', :worker_1_port);
(1 row) (1 row)
--------------------------------------------------------------------- ---------------------------------------------------------------------
-- Test master_disable_node() when the node that is being disabled is actually down -- Test citus_disable_node_and_wait() when the node that is being disabled is actually down
--------------------------------------------------------------------- ---------------------------------------------------------------------
SELECT master_update_node(:nodeid_2, 'localhost', 1); SELECT master_update_node(:nodeid_2, 'localhost', 1);
master_update_node master_update_node
@ -682,22 +683,9 @@ SELECT wait_until_metadata_sync(30000);
-- set metadatasynced so we try porpagating metadata changes -- set metadatasynced so we try porpagating metadata changes
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid IN (:nodeid_1, :nodeid_2); UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid IN (:nodeid_1, :nodeid_2);
-- should not error out, master_disable_node is tolerant for node failures -- should not error out, citus_disable_node is tolerant for node failures
SELECT 1 FROM master_disable_node('localhost', 1); -- but we should not wait metadata syncing to finish as this node is down
?column? SELECT 1 FROM citus_disable_node('localhost', 1, true);
---------------------------------------------------------------------
1
(1 row)
-- try again after stopping metadata sync
SELECT stop_metadata_sync_to_node('localhost', 1);
NOTICE: dropping metadata on the node (localhost,1)
stop_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
SELECT 1 FROM master_disable_node('localhost', 1);
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1
@ -734,7 +722,7 @@ SELECT verify_metadata('localhost', :worker_1_port);
(1 row) (1 row)
--------------------------------------------------------------------- ---------------------------------------------------------------------
-- Test master_disable_node() when the other node is down -- Test citus_disable_node_and_wait() when the other node is down
--------------------------------------------------------------------- ---------------------------------------------------------------------
-- node 1 is down. -- node 1 is down.
SELECT master_update_node(:nodeid_1, 'localhost', 1); SELECT master_update_node(:nodeid_1, 'localhost', 1);
@ -751,9 +739,14 @@ SELECT wait_until_metadata_sync(30000);
-- set metadatasynced so we try porpagating metadata changes -- set metadatasynced so we try porpagating metadata changes
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid IN (:nodeid_1, :nodeid_2); UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid IN (:nodeid_1, :nodeid_2);
-- should error out -- should not error out, citus_disable_node is tolerant for node failures
SELECT 1 FROM master_disable_node('localhost', :worker_2_port); -- but we should not wait metadata syncing to finish as this node is down
ERROR: Disabling localhost:xxxxx failed SELECT 1 FROM citus_disable_node('localhost', :worker_2_port);
?column?
---------------------------------------------------------------------
1
(1 row)
-- try again after stopping metadata sync -- try again after stopping metadata sync
SELECT stop_metadata_sync_to_node('localhost', 1); SELECT stop_metadata_sync_to_node('localhost', 1);
NOTICE: dropping metadata on the node (localhost,1) NOTICE: dropping metadata on the node (localhost,1)
@ -762,7 +755,7 @@ NOTICE: dropping metadata on the node (localhost,1)
(1 row) (1 row)
SELECT 1 FROM master_disable_node('localhost', :worker_2_port); SELECT 1 FROM citus_disable_node_and_wait('localhost', :worker_2_port);
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1

View File

@ -87,8 +87,14 @@ SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group;
(1 row) (1 row)
-- make sure when we disable a secondary we don't remove any placements -- make sure when we disable a secondary we don't remove any placements
SELECT master_disable_node('localhost', 9001); SELECT citus_disable_node('localhost', 9001);
master_disable_node citus_disable_node
---------------------------------------------------------------------
(1 row)
SELECT public.wait_until_metadata_sync();
wait_until_metadata_sync
--------------------------------------------------------------------- ---------------------------------------------------------------------
(1 row) (1 row)
@ -247,8 +253,14 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port);
(1 row) (1 row)
-- try to disable the node before removing it (this used to crash) -- try to disable the node before removing it (this used to crash)
SELECT master_disable_node('localhost', :worker_2_port); SELECT citus_disable_node('localhost', :worker_2_port);
master_disable_node citus_disable_node
---------------------------------------------------------------------
(1 row)
SELECT public.wait_until_metadata_sync();
wait_until_metadata_sync
--------------------------------------------------------------------- ---------------------------------------------------------------------
(1 row) (1 row)
@ -916,8 +928,8 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port);
1 1
(1 row) (1 row)
-- test with master_disable_node -- test with citus_disable_node_and_wait
-- status before master_disable_node -- status before citus_disable_node_and_wait
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -966,13 +978,19 @@ ORDER BY shardid ASC;
(0 rows) (0 rows)
\c - - - :master_port \c - - - :master_port
SELECT master_disable_node('localhost', :worker_2_port); SELECT citus_disable_node('localhost', :worker_2_port);
master_disable_node citus_disable_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
(1 row) (1 row)
-- status after master_disable_node SELECT public.wait_until_metadata_sync();
wait_until_metadata_sync
---------------------------------------------------------------------
(1 row)
-- status after citus_disable_node_and_wait
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -21,6 +21,14 @@ ERROR: cannot calculate the size because relation 'non_distributed_table' is no
SELECT citus_total_relation_size('non_distributed_table'); SELECT citus_total_relation_size('non_distributed_table');
ERROR: cannot calculate the size because relation 'non_distributed_table' is not distributed ERROR: cannot calculate the size because relation 'non_distributed_table' is not distributed
DROP TABLE non_distributed_table; DROP TABLE non_distributed_table;
-- fix broken placements via disabling the node
SET client_min_messages TO ERROR;
SELECT replicate_table_shards('lineitem_hash_part', shard_replication_factor:=2, shard_transfer_mode:='block_writes');
replicate_table_shards
---------------------------------------------------------------------
(1 row)
-- Tests on distributed table with replication factor > 1 -- Tests on distributed table with replication factor > 1
VACUUM (FULL) lineitem_hash_part; VACUUM (FULL) lineitem_hash_part;
SELECT citus_table_size('lineitem_hash_part'); SELECT citus_table_size('lineitem_hash_part');
@ -67,7 +75,7 @@ SELECT citus_table_size('customer_copy_hash'),
citus_table_size('supplier'); citus_table_size('supplier');
citus_table_size | citus_table_size | citus_table_size citus_table_size | citus_table_size | citus_table_size
--------------------------------------------------------------------- ---------------------------------------------------------------------
548864 | 548864 | 401408 548864 | 548864 | 425984
(1 row) (1 row)
CREATE INDEX index_1 on customer_copy_hash(c_custkey); CREATE INDEX index_1 on customer_copy_hash(c_custkey);

View File

@ -53,3 +53,13 @@ CREATE OR REPLACE FUNCTION pg_catalog.partition_task_list_results(resultIdPrefix
CREATE OR REPLACE FUNCTION top_transaction_context_size() RETURNS BIGINT CREATE OR REPLACE FUNCTION top_transaction_context_size() RETURNS BIGINT
LANGUAGE C STRICT VOLATILE LANGUAGE C STRICT VOLATILE
AS 'citus', $$top_transaction_context_size$$; AS 'citus', $$top_transaction_context_size$$;
CREATE OR REPLACE FUNCTION pg_catalog.citus_disable_node_and_wait(nodename text, nodeport integer, force bool DEFAULT false)
RETURNS void
LANGUAGE plpgsql
AS $function$
BEGIN
PERFORM pg_catalog.citus_disable_node(nodename, nodeport, force);
PERFORM public.wait_until_metadata_sync(30000);
END;
$function$;

View File

@ -0,0 +1,132 @@
CREATE SCHEMA disable_node_with_replicated_tables;
SET search_path TO disable_node_with_replicated_tables;
SET citus.next_shard_id TO 101500;
SET citus.shard_replication_factor TO 2;
CREATE TABLE replicated(a int, b int);
SELECT create_distributed_table('replicated', 'a', shard_count:=2);
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE TABLE ref (a int, b int);
SELECT create_reference_table('ref');
create_reference_table
---------------------------------------------------------------------
(1 row)
INSERT INTO replicated SELECT i,i FROM generate_series(0,10)i;
INSERT INTO ref SELECT i,i FROM generate_series(0,10)i;
-- should be successfully disable node
SELECT citus_disable_node('localhost', :worker_2_port, true);
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT citus_activate_node('localhost', 57638) to activate this node back.
citus_disable_node
---------------------------------------------------------------------
(1 row)
SELECT public.wait_until_metadata_sync();
wait_until_metadata_sync
---------------------------------------------------------------------
(1 row)
-- the placement should be removed both from the coordinator
-- and from the workers
SELECT count(*) FROM pg_dist_placement WHERE shardid IN (101500, 101501, 101502);
count
---------------------------------------------------------------------
3
(1 row)
\c - - - :worker_1_port
SELECT count(*) FROM pg_dist_placement WHERE shardid IN (101500, 101501, 101502);
count
---------------------------------------------------------------------
3
(1 row)
SET search_path TO disable_node_with_replicated_tables;
-- should be able to ingest data from both the worker and the coordinator
INSERT INTO replicated SELECT i,i FROM generate_series(0,10)i;
INSERT INTO ref SELECT i,i FROM generate_series(0,10)i;
\c - - - :master_port
SET search_path TO disable_node_with_replicated_tables;
-- should be able to ingest data from both the worker and the coordinator
INSERT INTO replicated SELECT i,i FROM generate_series(0,10)i;
INSERT INTO ref SELECT i,i FROM generate_series(0,10)i;
-- now, query with round-robin policy such that
-- each query should hit different replicas
SET citus.task_assignment_policy to "round-robin";
SELECT count(*) FROM ref;
count
---------------------------------------------------------------------
33
(1 row)
SELECT count(*) FROM ref;
count
---------------------------------------------------------------------
33
(1 row)
SELECT count(*) FROM replicated;
count
---------------------------------------------------------------------
33
(1 row)
SELECT count(*) FROM replicated;
count
---------------------------------------------------------------------
33
(1 row)
-- now, we should be able to replicate the shards back
SET client_min_messages TO ERROR;
SELECT 1 FROM citus_activate_node('localhost', :worker_2_port);
?column?
---------------------------------------------------------------------
1
(1 row)
SELECT 1 FROM replicate_table_shards('replicated', shard_replication_factor:=2, shard_transfer_mode:='block_writes');
?column?
---------------------------------------------------------------------
1
(1 row)
RESET client_min_messages;
-- should be able to ingest data from both the worker and the coordinator
INSERT INTO replicated SELECT i,i FROM generate_series(0,10)i;
INSERT INTO ref SELECT i,i FROM generate_series(0,10)i;
-- now, query with round-robin policy such that
-- each query should hit different replicas
SET citus.task_assignment_policy to "round-robin";
SELECT count(*) FROM ref;
count
---------------------------------------------------------------------
44
(1 row)
SELECT count(*) FROM ref;
count
---------------------------------------------------------------------
44
(1 row)
SELECT count(*) FROM replicated;
count
---------------------------------------------------------------------
44
(1 row)
SELECT count(*) FROM replicated;
count
---------------------------------------------------------------------
44
(1 row)
SET client_min_messages TO ERROR;
DROP SCHEMA disable_node_with_replicated_tables CASCADE;

View File

@ -779,14 +779,10 @@ CREATE TABLE test_schema_support.nation_hash (
n_regionkey integer not null, n_regionkey integer not null,
n_comment varchar(152) n_comment varchar(152)
); );
SELECT master_create_distributed_table('test_schema_support.nation_hash', 'n_nationkey', 'hash'); SET citus.shard_count TO 4;
master_create_distributed_table SET citus.shard_replication_factor TO 1;
--------------------------------------------------------------------- SELECT create_distributed_table('test_schema_support.nation_hash', 'n_nationkey', 'hash');
create_distributed_table
(1 row)
SELECT master_create_worker_shards('test_schema_support.nation_hash', 4, 1);
master_create_worker_shards
--------------------------------------------------------------------- ---------------------------------------------------------------------
(1 row) (1 row)
@ -797,18 +793,15 @@ CREATE TABLE test_schema_support.nation_hash2 (
n_regionkey integer not null, n_regionkey integer not null,
n_comment varchar(152) n_comment varchar(152)
); );
SELECT master_create_distributed_table('test_schema_support.nation_hash2', 'n_nationkey', 'hash'); SELECT create_distributed_table('test_schema_support.nation_hash2', 'n_nationkey', 'hash');
master_create_distributed_table create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT master_create_worker_shards('test_schema_support.nation_hash2', 4, 1);
master_create_worker_shards
--------------------------------------------------------------------- ---------------------------------------------------------------------
(1 row) (1 row)
-- Mark tables as coordinator replicated in order to be able to test replicate_table_shards
UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid IN
('test_schema_support.nation_hash2'::regclass, 'test_schema_support.nation_hash'::regclass);
-- Shard count before replication -- Shard count before replication
SELECT COUNT(*) FROM pg_dist_shard_placement; SELECT COUNT(*) FROM pg_dist_shard_placement;
count count
@ -817,22 +810,22 @@ SELECT COUNT(*) FROM pg_dist_shard_placement;
(1 row) (1 row)
SET search_path TO public; SET search_path TO public;
SELECT replicate_table_shards('test_schema_support.nation_hash', shard_transfer_mode:='block_writes'); SELECT replicate_table_shards('test_schema_support.nation_hash', shard_replication_factor:=2, max_shard_copies:=1, shard_transfer_mode:='block_writes');
replicate_table_shards replicate_table_shards
--------------------------------------------------------------------- ---------------------------------------------------------------------
(1 row) (1 row)
-- Confirm replication -- Confirm replication, both tables replicated due to colocation
SELECT COUNT(*) FROM pg_dist_shard_placement; SELECT COUNT(*) FROM pg_dist_shard_placement;
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 10
(1 row) (1 row)
-- Test with search_path is set -- Test with search_path is set
SET search_path TO test_schema_support; SET search_path TO test_schema_support;
SELECT replicate_table_shards('nation_hash2', shard_transfer_mode:='block_writes'); SELECT replicate_table_shards('nation_hash2', shard_replication_factor:=2, shard_transfer_mode:='block_writes');
replicate_table_shards replicate_table_shards
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1073,14 +1066,14 @@ CALL citus_cleanup_orphaned_shards();
select * from pg_dist_placement ORDER BY placementid; select * from pg_dist_placement ORDER BY placementid;
placementid | shardid | shardstate | shardlength | groupid placementid | shardid | shardstate | shardlength | groupid
--------------------------------------------------------------------- ---------------------------------------------------------------------
151 | 123023 | 1 | 0 | 14 150 | 123023 | 1 | 0 | 14
154 | 123024 | 1 | 0 | 14 153 | 123024 | 1 | 0 | 14
157 | 123027 | 1 | 0 | 14 156 | 123027 | 1 | 0 | 14
158 | 123028 | 1 | 0 | 14 157 | 123028 | 1 | 0 | 14
159 | 123021 | 1 | 0 | 16 158 | 123021 | 1 | 0 | 16
160 | 123025 | 1 | 0 | 16 159 | 123025 | 1 | 0 | 16
161 | 123022 | 1 | 0 | 16 160 | 123022 | 1 | 0 | 16
162 | 123026 | 1 | 0 | 16 161 | 123026 | 1 | 0 | 16
(8 rows) (8 rows)
-- Move all shards to worker1 again -- Move all shards to worker1 again
@ -2042,6 +2035,12 @@ SELECT 1 FROM master_remove_node('localhost', :master_port);
1 1
(1 row) (1 row)
SELECT public.wait_until_metadata_sync(30000);
wait_until_metadata_sync
---------------------------------------------------------------------
(1 row)
-- --
-- Make sure that rebalance_table_shards() and replicate_table_shards() replicate -- Make sure that rebalance_table_shards() and replicate_table_shards() replicate
-- reference tables to the coordinator when replicate_reference_tables_on_activate -- reference tables to the coordinator when replicate_reference_tables_on_activate
@ -2101,6 +2100,12 @@ SELECT 1 FROM master_remove_node('localhost', :master_port);
1 1
(1 row) (1 row)
SELECT public.wait_until_metadata_sync(30000);
wait_until_metadata_sync
---------------------------------------------------------------------
(1 row)
CREATE TABLE rebalance_test_table(int_column int); CREATE TABLE rebalance_test_table(int_column int);
SELECT master_create_distributed_table('rebalance_test_table', 'int_column', 'append'); SELECT master_create_distributed_table('rebalance_test_table', 'int_column', 'append');
master_create_distributed_table master_create_distributed_table
@ -2141,6 +2146,12 @@ SELECT 1 FROM master_remove_node('localhost', :master_port);
1 1
(1 row) (1 row)
SELECT public.wait_until_metadata_sync(30000);
wait_until_metadata_sync
---------------------------------------------------------------------
(1 row)
-- reference table 2 will not have a replica identity, causing the rebalancer to not work -- reference table 2 will not have a replica identity, causing the rebalancer to not work
-- when ran in the default mode. Instead we need to change the shard transfer mode to make -- when ran in the default mode. Instead we need to change the shard transfer mode to make
-- it work. This verifies the shard transfer mode used in the rebalancer is used for the -- it work. This verifies the shard transfer mode used in the rebalancer is used for the
@ -2156,6 +2167,12 @@ SELECT 1 from master_remove_node('localhost', :worker_2_port);
1 1
(1 row) (1 row)
SELECT public.wait_until_metadata_sync(30000);
wait_until_metadata_sync
---------------------------------------------------------------------
(1 row)
SELECT create_distributed_table('t1','a'); SELECT create_distributed_table('t1','a');
create_distributed_table create_distributed_table
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -2207,6 +2224,12 @@ SELECT 1 from master_remove_node('localhost', :worker_2_port);
1 1
(1 row) (1 row)
SELECT public.wait_until_metadata_sync(30000);
wait_until_metadata_sync
---------------------------------------------------------------------
(1 row)
CREATE TABLE r1 (a int PRIMARY KEY, b int); CREATE TABLE r1 (a int PRIMARY KEY, b int);
SELECT create_reference_table('r1'); SELECT create_reference_table('r1');
create_reference_table create_reference_table
@ -2260,6 +2283,12 @@ SELECT 1 from master_remove_node('localhost', :worker_2_port);
1 1
(1 row) (1 row)
SELECT public.wait_until_metadata_sync(30000);
wait_until_metadata_sync
---------------------------------------------------------------------
(1 row)
CREATE TABLE t1 (a int PRIMARY KEY, b int); CREATE TABLE t1 (a int PRIMARY KEY, b int);
CREATE TABLE r1 (a int PRIMARY KEY, b int); CREATE TABLE r1 (a int PRIMARY KEY, b int);
SELECT create_distributed_table('t1', 'a'); SELECT create_distributed_table('t1', 'a');

View File

@ -17,9 +17,8 @@ SELECT 1 FROM citus_set_coordinator_host('localhost', :master_port);
(1 row) (1 row)
-- coordinator cannot be disabled -- coordinator cannot be disabled
SELECT 1 FROM master_disable_node('localhost', :master_port); SELECT 1 FROM citus_disable_node('localhost', :master_port);
ERROR: Disabling localhost:xxxxx failed ERROR: cannot change "isactive" field of the coordinator node
DETAIL: cannot change "isactive" field of the coordinator node
RESET client_min_messages; RESET client_min_messages;
SELECT 1 FROM master_remove_node('localhost', :master_port); SELECT 1 FROM master_remove_node('localhost', :master_port);
?column? ?column?

View File

@ -405,45 +405,6 @@ SELECT hasmetadata, metadatasynced, shouldhaveshards FROM pg_dist_node WHERE nod
--------------------------------------------------------------------- ---------------------------------------------------------------------
(0 rows) (0 rows)
\c - - - :master_port
-- verify that mx workers are updated when disabling/activating nodes
SELECT citus_disable_node('localhost', :worker_1_port);
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT citus_activate_node('localhost', 57637) to activate this node back.
citus_disable_node
---------------------------------------------------------------------
(1 row)
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
start_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
\c - - - :worker_2_port
SELECT nodeport, isactive FROM pg_dist_node WHERE nodeport IN (:worker_1_port, :worker_2_port) ORDER BY nodeport;
nodeport | isactive
---------------------------------------------------------------------
57637 | f
57638 | t
(2 rows)
\c - - - :master_port
SET client_min_messages TO ERROR;
SELECT citus_activate_node('localhost', :worker_1_port);
citus_activate_node
---------------------------------------------------------------------
16
(1 row)
\c - - - :worker_2_port
SELECT nodeport, isactive FROM pg_dist_node WHERE nodeport IN (:worker_1_port, :worker_2_port) ORDER BY nodeport;
nodeport | isactive
---------------------------------------------------------------------
57637 | t
57638 | t
(2 rows)
\c - - - :master_port \c - - - :master_port
SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
NOTICE: dropping metadata on the node (localhost,57638) NOTICE: dropping metadata on the node (localhost,57638)

View File

@ -42,7 +42,7 @@ ORDER BY 1;
function citus_conninfo_cache_invalidate() function citus_conninfo_cache_invalidate()
function citus_copy_shard_placement(bigint,text,integer,text,integer,boolean,citus.shard_transfer_mode) function citus_copy_shard_placement(bigint,text,integer,text,integer,boolean,citus.shard_transfer_mode)
function citus_create_restore_point(text) function citus_create_restore_point(text)
function citus_disable_node(text,integer) function citus_disable_node(text,integer,boolean)
function citus_dist_local_group_cache_invalidate() function citus_dist_local_group_cache_invalidate()
function citus_dist_node_cache_invalidate() function citus_dist_node_cache_invalidate()
function citus_dist_object_cache_invalidate() function citus_dist_object_cache_invalidate()

View File

@ -17,12 +17,8 @@ CREATE TABLE customer_copy_hash (
c_mktsegment char(10), c_mktsegment char(10),
c_comment varchar(117), c_comment varchar(117),
primary key (c_custkey)); primary key (c_custkey));
SELECT master_create_distributed_table('customer_copy_hash', 'c_custkey', 'hash'); SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('customer_copy_hash', 'c_custkey', shard_count:=64);
-- Test COPY into empty hash-partitioned table
COPY customer_copy_hash FROM '@abs_srcdir@/data/customer.1.data' WITH (DELIMITER '|');
SELECT master_create_worker_shards('customer_copy_hash', 64, 1);
-- Test empty copy -- Test empty copy
COPY customer_copy_hash FROM STDIN; COPY customer_copy_hash FROM STDIN;
@ -123,10 +119,8 @@ CREATE TABLE customer_with_default(
c_custkey integer, c_custkey integer,
c_name varchar(25) not null, c_name varchar(25) not null,
c_time timestamp default now()); c_time timestamp default now());
SET citus.shard_replication_factor TO 1;
SELECT master_create_distributed_table('customer_with_default', 'c_custkey', 'hash'); SELECT create_distributed_table('customer_with_default', 'c_custkey', shard_count:=64);
SELECT master_create_worker_shards('customer_with_default', 64, 1);
-- Test with default values for now() function -- Test with default values for now() function
COPY customer_with_default (c_custkey, c_name) FROM STDIN COPY customer_with_default (c_custkey, c_name) FROM STDIN
@ -221,6 +215,7 @@ CREATE TABLE customer_copy_append (
c_mktsegment char(10), c_mktsegment char(10),
c_comment varchar(117)); c_comment varchar(117));
SELECT create_distributed_table('customer_copy_append', 'c_custkey', 'append'); SELECT create_distributed_table('customer_copy_append', 'c_custkey', 'append');
SET citus.shard_replication_factor TO 2;
-- Test syntax error -- Test syntax error
BEGIN; BEGIN;
@ -444,6 +439,7 @@ COPY composite_partition_column_table FROM STDIN WITH (FORMAT 'csv', append_to_s
-- Test copy on append distributed tables do not create shards on removed workers -- Test copy on append distributed tables do not create shards on removed workers
SET citus.shard_replication_factor TO 2;
CREATE TABLE numbers_append (a int, b int); CREATE TABLE numbers_append (a int, b int);
SELECT create_distributed_table('numbers_append', 'a', 'append'); SELECT create_distributed_table('numbers_append', 'a', 'append');
@ -470,7 +466,14 @@ SELECT shardid, nodename, nodeport
WHERE logicalrelid = 'numbers_append'::regclass order by placementid; WHERE logicalrelid = 'numbers_append'::regclass order by placementid;
-- disable the first node -- disable the first node
SET client_min_messages TO ERROR;
\set VERBOSITY terse
SELECT master_disable_node('localhost', :worker_1_port); SELECT master_disable_node('localhost', :worker_1_port);
SELECT public.wait_until_metadata_sync(30000);
RESET client_min_messages;
\set VERBOSITY default
-- set replication factor to 1 so that copy will -- set replication factor to 1 so that copy will
-- succeed without replication count error -- succeed without replication count error
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
@ -525,7 +528,7 @@ SELECT * FROM run_command_on_workers('CREATE USER test_user');
\c - test_user \c - test_user
SET citus.shard_count to 4; SET citus.shard_count to 4;
CREATE TABLE numbers_hash (a int, b int); CREATE TABLE numbers_hash (a int, b int);
SELECT create_distributed_table('numbers_hash', 'a'); SELECT create_distributed_table('numbers_hash', 'a', colocate_with:='none');
COPY numbers_hash FROM STDIN WITH (FORMAT 'csv'); COPY numbers_hash FROM STDIN WITH (FORMAT 'csv');
1,1 1,1
@ -553,7 +556,7 @@ COPY numbers_reference FROM STDIN WITH (FORMAT 'csv');
-- create another hash distributed table -- create another hash distributed table
CREATE TABLE numbers_hash_other(a int, b int); CREATE TABLE numbers_hash_other(a int, b int);
SELECT create_distributed_table('numbers_hash_other', 'a'); SELECT create_distributed_table('numbers_hash_other', 'a', colocate_with:='numbers_hash');
SELECT shardid, shardstate, nodename, nodeport SELECT shardid, shardstate, nodename, nodeport
FROM pg_dist_shard_placement join pg_dist_shard using(shardid) FROM pg_dist_shard_placement join pg_dist_shard using(shardid)
WHERE logicalrelid = 'numbers_hash_other'::regclass order by shardid, nodeport; WHERE logicalrelid = 'numbers_hash_other'::regclass order by shardid, nodeport;
@ -631,8 +634,9 @@ DROP TABLE numbers_reference;
-- test copy failure inside the node -- test copy failure inside the node
-- it will be done by changing definition of a shard table -- it will be done by changing definition of a shard table
SET citus.shard_count to 4; SET citus.shard_count to 4;
SET citus.next_shard_id TO 560170;
CREATE TABLE numbers_hash(a int, b int); CREATE TABLE numbers_hash(a int, b int);
SELECT create_distributed_table('numbers_hash', 'a'); SELECT create_distributed_table('numbers_hash', 'a', colocate_with:='none');
\c - - - :worker_1_port \c - - - :worker_1_port
ALTER TABLE numbers_hash_560170 DROP COLUMN b; ALTER TABLE numbers_hash_560170 DROP COLUMN b;

View File

@ -2,8 +2,8 @@ test: isolation_add_remove_node
test: isolation_turn_mx_off test: isolation_turn_mx_off
test: isolation_update_node test: isolation_update_node
test: isolation_update_node_lock_writes test: isolation_update_node_lock_writes
test: isolation_ensure_dependency_activate_node
test: isolation_turn_mx_on test: isolation_turn_mx_on
test: isolation_ensure_dependency_activate_node
test: isolation_add_node_vs_reference_table_operations test: isolation_add_node_vs_reference_table_operations
test: isolation_create_table_vs_add_remove_node test: isolation_create_table_vs_add_remove_node
test: isolation_master_update_node test: isolation_master_update_node

View File

@ -20,6 +20,7 @@ test: multi_extension
test: single_node test: single_node
test: single_node_truncate test: single_node_truncate
test: turn_mx_on test: turn_mx_on
test: multi_test_helpers multi_test_helpers_superuser
test: multi_cluster_management test: multi_cluster_management
# below tests are placed right after multi_cluster_management as we do # below tests are placed right after multi_cluster_management as we do
@ -30,7 +31,6 @@ test: escape_extension_name
test: ref_citus_local_fkeys test: ref_citus_local_fkeys
test: alter_database_owner test: alter_database_owner
test: multi_test_helpers multi_test_helpers_superuser
test: multi_test_catalog_views test: multi_test_catalog_views
test: check_mx test: check_mx
test: turn_mx_off test: turn_mx_off

View File

@ -16,8 +16,8 @@
test: turn_mx_off test: turn_mx_off
test: multi_extension test: multi_extension
test: multi_test_helpers multi_test_helpers_superuser test: multi_test_helpers multi_test_helpers_superuser
test: multi_mx_node_metadata
test: turn_mx_on test: turn_mx_on
test: multi_mx_node_metadata
test: multi_cluster_management test: multi_cluster_management
test: multi_mx_function_table_reference test: multi_mx_function_table_reference
test: multi_test_catalog_views test: multi_test_catalog_views

View File

@ -1,6 +1,7 @@
test: multi_cluster_management
test: multi_test_helpers multi_test_helpers_superuser multi_create_fdw test: multi_test_helpers multi_test_helpers_superuser multi_create_fdw
test: multi_cluster_management
test: multi_test_catalog_views test: multi_test_catalog_views
test: replicated_table_disable_node
# ---------- # ----------
# The following distributed tests depend on creating a partitioned table and # The following distributed tests depend on creating a partitioned table and

View File

@ -1,9 +1,10 @@
test: multi_cluster_management
test: turn_mx_off
test: multi_test_helpers multi_test_helpers_superuser test: multi_test_helpers multi_test_helpers_superuser
test: multi_cluster_management
test: multi_test_catalog_views test: multi_test_catalog_views
test: shard_rebalancer_unit test: shard_rebalancer_unit
test: turn_mx_off
test: shard_rebalancer test: shard_rebalancer
test: turn_mx_on
test: foreign_key_to_reference_shard_rebalance test: foreign_key_to_reference_shard_rebalance
test: multi_move_mx test: multi_move_mx
test: shard_move_deferred_delete test: shard_move_deferred_delete

View File

@ -13,19 +13,9 @@ CREATE TABLE customer_copy_hash (
c_mktsegment char(10), c_mktsegment char(10),
c_comment varchar(117), c_comment varchar(117),
primary key (c_custkey)); primary key (c_custkey));
SELECT master_create_distributed_table('customer_copy_hash', 'c_custkey', 'hash'); SET citus.shard_replication_factor TO 1;
master_create_distributed_table SELECT create_distributed_table('customer_copy_hash', 'c_custkey', shard_count:=64);
--------------------------------------------------------------------- create_distributed_table
(1 row)
-- Test COPY into empty hash-partitioned table
COPY customer_copy_hash FROM '@abs_srcdir@/data/customer.1.data' WITH (DELIMITER '|');
ERROR: could not find any shards into which to copy
DETAIL: No shards exist for distributed table "customer_copy_hash".
HINT: Run master_create_worker_shards to create shards and try again.
SELECT master_create_worker_shards('customer_copy_hash', 64, 1);
master_create_worker_shards
--------------------------------------------------------------------- ---------------------------------------------------------------------
(1 row) (1 row)
@ -146,14 +136,9 @@ CREATE TABLE customer_with_default(
c_custkey integer, c_custkey integer,
c_name varchar(25) not null, c_name varchar(25) not null,
c_time timestamp default now()); c_time timestamp default now());
SELECT master_create_distributed_table('customer_with_default', 'c_custkey', 'hash'); SET citus.shard_replication_factor TO 1;
master_create_distributed_table SELECT create_distributed_table('customer_with_default', 'c_custkey', shard_count:=64);
--------------------------------------------------------------------- create_distributed_table
(1 row)
SELECT master_create_worker_shards('customer_with_default', 64, 1);
master_create_worker_shards
--------------------------------------------------------------------- ---------------------------------------------------------------------
(1 row) (1 row)
@ -246,8 +231,7 @@ SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = :new_sh
shardid | shardlength shardid | shardlength
--------------------------------------------------------------------- ---------------------------------------------------------------------
560129 | 0 560129 | 0
560129 | 0 (1 row)
(2 rows)
-- Update shard statistics for range-partitioned shard -- Update shard statistics for range-partitioned shard
SELECT citus_update_shard_statistics(:new_shard_id); SELECT citus_update_shard_statistics(:new_shard_id);
@ -260,8 +244,7 @@ SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = :new_sh
shardid | shardlength shardid | shardlength
--------------------------------------------------------------------- ---------------------------------------------------------------------
560129 | 131072 560129 | 131072
560129 | 131072 (1 row)
(2 rows)
-- Create a new append-partitioned table into which to COPY -- Create a new append-partitioned table into which to COPY
CREATE TABLE customer_copy_append ( CREATE TABLE customer_copy_append (
@ -279,6 +262,7 @@ SELECT create_distributed_table('customer_copy_append', 'c_custkey', 'append');
(1 row) (1 row)
SET citus.shard_replication_factor TO 2;
-- Test syntax error -- Test syntax error
BEGIN; BEGIN;
SELECT master_create_empty_shard('customer_copy_append') AS shardid \gset SELECT master_create_empty_shard('customer_copy_append') AS shardid \gset
@ -574,6 +558,7 @@ SELECT create_distributed_table('composite_partition_column_table', 'composite_c
SELECT master_create_empty_shard('composite_partition_column_table') AS shardid \gset SELECT master_create_empty_shard('composite_partition_column_table') AS shardid \gset
COPY composite_partition_column_table FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid); COPY composite_partition_column_table FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid);
-- Test copy on append distributed tables do not create shards on removed workers -- Test copy on append distributed tables do not create shards on removed workers
SET citus.shard_replication_factor TO 2;
CREATE TABLE numbers_append (a int, b int); CREATE TABLE numbers_append (a int, b int);
SELECT create_distributed_table('numbers_append', 'a', 'append'); SELECT create_distributed_table('numbers_append', 'a', 'append');
create_distributed_table create_distributed_table
@ -606,13 +591,18 @@ SELECT shardid, nodename, nodeport
(4 rows) (4 rows)
-- disable the first node -- disable the first node
SET client_min_messages TO ERROR;
\set VERBOSITY terse
SELECT master_disable_node('localhost', :worker_1_port); SELECT master_disable_node('localhost', :worker_1_port);
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT citus_activate_node('localhost', 57637) to activate this node back. ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx
master_disable_node SELECT public.wait_until_metadata_sync(30000);
wait_until_metadata_sync
--------------------------------------------------------------------- ---------------------------------------------------------------------
(1 row) (1 row)
RESET client_min_messages;
\set VERBOSITY default
-- set replication factor to 1 so that copy will -- set replication factor to 1 so that copy will
-- succeed without replication count error -- succeed without replication count error
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
@ -630,7 +620,7 @@ SELECT shardid, nodename, nodeport
560155 | localhost | 57638 560155 | localhost | 57638
560156 | localhost | 57638 560156 | localhost | 57638
560156 | localhost | 57637 560156 | localhost | 57637
560157 | localhost | 57638 560157 | localhost | 57637
560158 | localhost | 57638 560158 | localhost | 57638
(6 rows) (6 rows)
@ -658,7 +648,7 @@ SELECT shardid, nodename, nodeport
560155 | localhost | 57638 560155 | localhost | 57638
560156 | localhost | 57638 560156 | localhost | 57638
560156 | localhost | 57637 560156 | localhost | 57637
560157 | localhost | 57638 560157 | localhost | 57637
560158 | localhost | 57638 560158 | localhost | 57638
560159 | localhost | 57637 560159 | localhost | 57637
560159 | localhost | 57638 560159 | localhost | 57638
@ -682,7 +672,7 @@ SELECT * FROM run_command_on_workers('CREATE USER test_user');
\c - test_user \c - test_user
SET citus.shard_count to 4; SET citus.shard_count to 4;
CREATE TABLE numbers_hash (a int, b int); CREATE TABLE numbers_hash (a int, b int);
SELECT create_distributed_table('numbers_hash', 'a'); SELECT create_distributed_table('numbers_hash', 'a', colocate_with:='none');
create_distributed_table create_distributed_table
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -716,7 +706,7 @@ SELECT create_reference_table('numbers_reference');
COPY numbers_reference FROM STDIN WITH (FORMAT 'csv'); COPY numbers_reference FROM STDIN WITH (FORMAT 'csv');
-- create another hash distributed table -- create another hash distributed table
CREATE TABLE numbers_hash_other(a int, b int); CREATE TABLE numbers_hash_other(a int, b int);
SELECT create_distributed_table('numbers_hash_other', 'a'); SELECT create_distributed_table('numbers_hash_other', 'a', colocate_with:='numbers_hash');
create_distributed_table create_distributed_table
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -817,8 +807,9 @@ DROP TABLE numbers_reference;
-- test copy failure inside the node -- test copy failure inside the node
-- it will be done by changing definition of a shard table -- it will be done by changing definition of a shard table
SET citus.shard_count to 4; SET citus.shard_count to 4;
SET citus.next_shard_id TO 560170;
CREATE TABLE numbers_hash(a int, b int); CREATE TABLE numbers_hash(a int, b int);
SELECT create_distributed_table('numbers_hash', 'a'); SELECT create_distributed_table('numbers_hash', 'a', colocate_with:='none');
create_distributed_table create_distributed_table
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -1,6 +1,10 @@
setup setup
{ {
SELECT 1; SELECT 1;
CREATE OR REPLACE FUNCTION public.wait_until_metadata_sync(timeout INTEGER DEFAULT 15000)
RETURNS void
LANGUAGE C STRICT
AS 'citus';
} }
teardown teardown
@ -38,6 +42,7 @@ step "s1-activate-node-1"
step "s1-disable-node-1" step "s1-disable-node-1"
{ {
SELECT 1 FROM master_disable_node('localhost', 57637); SELECT 1 FROM master_disable_node('localhost', 57637);
SELECT public.wait_until_metadata_sync();
} }
step "s1-remove-node-1" step "s1-remove-node-1"
@ -80,6 +85,7 @@ step "s2-activate-node-1"
step "s2-disable-node-1" step "s2-disable-node-1"
{ {
SELECT 1 FROM master_disable_node('localhost', 57637); SELECT 1 FROM master_disable_node('localhost', 57637);
SELECT public.wait_until_metadata_sync();
} }
step "s2-remove-node-1" step "s2-remove-node-1"
@ -128,7 +134,5 @@ permutation "s1-add-inactive-1" "s1-begin" "s1-disable-node-1" "s2-activate-node
// activate and disable an inactive node node from 2 transactions, should be ok // activate and disable an inactive node node from 2 transactions, should be ok
permutation "s1-add-inactive-1" "s1-begin" "s1-activate-node-1" "s2-disable-node-1" "s1-commit" "s1-show-nodes" permutation "s1-add-inactive-1" "s1-begin" "s1-activate-node-1" "s2-disable-node-1" "s1-commit" "s1-show-nodes"
// activate and disable an inactive node from 2 transactions, one aborts
permutation "s1-add-inactive-1" "s1-begin" "s1-activate-node-1" "s2-disable-node-1" "s1-abort" "s1-show-nodes"
// disable an active node from 2 transactions, one aborts // disable an active node from 2 transactions, one aborts
permutation "s1-add-node-1" "s1-begin" "s1-disable-node-1" "s2-disable-node-1" "s1-abort" "s1-show-nodes" permutation "s1-add-node-1" "s1-begin" "s1-disable-node-1" "s2-disable-node-1" "s1-abort" "s1-show-nodes"

View File

@ -25,6 +25,7 @@ session "s1"
step "s1-begin" step "s1-begin"
{ {
BEGIN; BEGIN;
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
} }
step "s1-update" step "s1-update"
@ -42,6 +43,7 @@ session "s2"
step "s2-begin" step "s2-begin"
{ {
BEGIN; BEGIN;
SELECT assign_distributed_transaction_id(0, 9, '2021-07-09 15:41:55.542377+02');
} }
step "s2-update" step "s2-update"
@ -59,6 +61,7 @@ session "s3"
step "s3-begin" step "s3-begin"
{ {
BEGIN; BEGIN;
SELECT assign_distributed_transaction_id(0, 10, '2021-07-09 15:41:55.542377+02');
} }
step "s3-update" step "s3-update"

View File

@ -2,10 +2,10 @@
// add single one of the nodes for the purpose of the test // add single one of the nodes for the purpose of the test
setup setup
{ {
CREATE OR REPLACE FUNCTION wait_until_metadata_sync(timeout INTEGER) CREATE OR REPLACE FUNCTION public.wait_until_metadata_sync(timeout INTEGER DEFAULT 15000)
RETURNS void RETURNS void
LANGUAGE C STRICT VOLATILE LANGUAGE C STRICT
AS 'citus', $$wait_until_metadata_sync$$; AS 'citus';
SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node; SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node;
SELECT 1 FROM master_add_node('localhost', 57637); SELECT 1 FROM master_add_node('localhost', 57637);
@ -17,12 +17,6 @@ teardown
SELECT 1 FROM master_add_node('localhost', 57637); SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('localhost', 57638); SELECT 1 FROM master_add_node('localhost', 57638);
-- schema drops are not cascaded
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
DROP SCHEMA IF EXISTS myschema CASCADE;
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
DROP SCHEMA IF EXISTS myschema2 CASCADE;
RESET search_path; RESET search_path;
DROP TABLE IF EXISTS t1 CASCADE; DROP TABLE IF EXISTS t1 CASCADE;
DROP TABLE IF EXISTS t2 CASCADE; DROP TABLE IF EXISTS t2 CASCADE;
@ -182,34 +176,52 @@ step "s3-commit"
COMMIT; COMMIT;
} }
step "s3-drop-coordinator-schemas"
{
-- schema drops are not cascaded
-- and cannot be dropped in a single
-- transaction in teardown
-- because it'd self-deadlock
-- instead we drop the schemas
-- at the end of the permutations
DROP SCHEMA IF EXISTS myschema CASCADE;
DROP SCHEMA IF EXISTS myschema2 CASCADE;
}
step "s3-drop-worker-schemas"
{
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
}
// schema only tests // schema only tests
permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-create-table" "s1-commit" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-create-table" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas"
permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s1-add-worker" "s2-public-schema" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s1-add-worker" "s2-public-schema" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas"
permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-public-schema" "s2-create-table" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-public-schema" "s2-create-table" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas"
permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-create-schema" "s2-create-table" "s1-commit" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-create-schema" "s2-create-table" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas"
permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s1-add-worker" "s2-create-schema" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s1-add-worker" "s2-create-schema" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas"
permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-create-schema" "s2-create-table" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-create-schema" "s2-create-table" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas"
// concurrency tests with multi schema distribution // concurrency tests with multi schema distribution
permutation "s1-print-distributed-objects" "s2-create-schema" "s1-begin" "s2-begin" "s3-begin" "s1-add-worker" "s2-create-table" "s3-use-schema" "s3-create-table" "s1-commit" "s2-commit" "s3-commit" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s2-create-schema" "s1-begin" "s2-begin" "s1-add-worker" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas"
permutation "s1-print-distributed-objects" "s1-add-worker" "s2-create-schema" "s2-begin" "s3-begin" "s3-use-schema" "s2-create-table" "s3-create-table" "s2-commit" "s3-commit" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-add-worker" "s2-create-schema" "s2-begin" "s3-begin" "s3-use-schema" "s2-create-table" "s3-create-table" "s2-commit" "s3-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas"
permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s3-begin" "s1-add-worker" "s2-create-schema" "s3-create-schema2" "s2-create-table" "s3-create-table" "s1-commit" "s3-commit" "s2-commit" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s3-begin" "s1-add-worker" "s2-create-schema" "s3-create-schema2" "s2-create-table" "s3-create-table" "s1-commit" "s3-commit" "s2-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas"
// type and schema tests // type and schema tests
permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-create-type" "s1-commit" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-create-type" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas"
permutation "s1-print-distributed-objects" "s1-begin" "s2-public-schema" "s2-create-type" "s1-add-worker" "s1-commit" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-begin" "s2-public-schema" "s2-create-type" "s1-add-worker" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas"
permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-create-schema" "s2-create-type" "s2-create-table-with-type" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-create-schema" "s2-create-type" "s2-create-table-with-type" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas"
// distributed function tests // distributed function tests
// isolation tests are not very simple psql, so trigger NOTIFY reliably for // isolation tests are not very simple psql, so trigger NOTIFY reliably for
// s3-wait-for-metadata-sync step, we do "s2-begin" followed directly by // s3-wait-for-metadata-sync step, we do "s2-begin" followed directly by
// "s2-commit", because "COMMIT" syncs the messages // "s2-commit", because "COMMIT" syncs the messages
permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-distribute-function" "s1-commit" "s2-begin" "s2-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-distribute-function" "s1-commit" "s2-begin" "s2-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas"
permutation "s1-print-distributed-objects" "s1-begin" "s2-public-schema" "s2-distribute-function" "s2-begin" "s2-commit" "s3-wait-for-metadata-sync" "s1-add-worker" "s1-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-begin" "s2-public-schema" "s2-distribute-function" "s2-begin" "s2-commit" "s3-wait-for-metadata-sync" "s1-add-worker" "s1-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas"
// we cannot run the following operations concurrently // we cannot run the following operations concurrently
// the problem is that NOTIFY event doesn't (reliably) happen before COMMIT // the problem is that NOTIFY event doesn't (reliably) happen before COMMIT
// so we have to commit s2 before s1 starts // so we have to commit s2 before s1 starts
permutation "s1-print-distributed-objects" "s2-begin" "s2-create-schema" "s2-distribute-function" "s2-commit" "s3-wait-for-metadata-sync" "s1-begin" "s1-add-worker" "s1-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s2-begin" "s2-create-schema" "s2-distribute-function" "s2-commit" "s3-wait-for-metadata-sync" "s1-begin" "s1-add-worker" "s1-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas"

View File

@ -3,13 +3,13 @@ setup
SELECT 1 FROM master_add_node('localhost', 57637); SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('localhost', 57638); SELECT 1 FROM master_add_node('localhost', 57638);
SELECT nodeid, nodename, nodeport from pg_dist_node; SELECT nodeid, nodename, nodeport from pg_dist_node ORDER BY 1 DESC;
} }
teardown teardown
{ {
SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node; SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node;
SELECT nodeid, nodename, nodeport from pg_dist_node; SELECT nodeid, nodename, nodeport from pg_dist_node ORDER BY 1 DESC;
} }
session "s1" session "s1"

View File

@ -1,7 +1,7 @@
-- --
-- failure_add_disable_node tests master_add_node, master_remove_node -- failure_add_disable_node tests master_add_node, master_remove_node
-- master_activate_node for failures. -- master_activate_node for failures.
-- master_disable_node and master_add_inactive_node can not be -- citus_disable_node_and_wait and master_add_inactive_node can not be
-- tested as they don't create network activity -- tested as they don't create network activity
-- --
@ -30,7 +30,8 @@ FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid)
WHERE s.logicalrelid = 'user_table'::regclass WHERE s.logicalrelid = 'user_table'::regclass
ORDER BY placementid; ORDER BY placementid;
SELECT master_disable_node('localhost', :worker_2_proxy_port); SELECT citus_disable_node('localhost', :worker_2_proxy_port, true);
SELECT public.wait_until_metadata_sync();
SELECT * FROM master_get_active_worker_nodes() SELECT * FROM master_get_active_worker_nodes()
ORDER BY 1, 2; ORDER BY 1, 2;

View File

@ -6,6 +6,11 @@ ALTER SYSTEM SET citus.recover_2pc_interval TO -1;
ALTER SYSTEM set citus.enable_statistics_collection TO false; ALTER SYSTEM set citus.enable_statistics_collection TO false;
SELECT pg_reload_conf(); SELECT pg_reload_conf();
CREATE OR REPLACE FUNCTION wait_until_metadata_sync(timeout INTEGER DEFAULT 15000)
RETURNS void
LANGUAGE C STRICT
AS 'citus';
-- Add some helper functions for sending commands to mitmproxy -- Add some helper functions for sending commands to mitmproxy
CREATE FUNCTION citus.mitmproxy(text) RETURNS TABLE(result text) AS $$ CREATE FUNCTION citus.mitmproxy(text) RETURNS TABLE(result text) AS $$

View File

@ -24,9 +24,11 @@ SELECT master_remove_node('localhost', :worker_2_port);
-- verify that the node has been deleted -- verify that the node has been deleted
SELECT master_get_active_worker_nodes(); SELECT master_get_active_worker_nodes();
-- try to disable a node with no placements see that node is removed -- try to disable a node with no placements see that node is s=removed
SELECT 1 FROM master_add_node('localhost', :worker_2_port); SELECT 1 FROM master_add_node('localhost', :worker_2_port);
SELECT master_disable_node('localhost', :worker_2_port);
SELECT citus_disable_node('localhost', :worker_2_port);
SELECT public.wait_until_metadata_sync();
SELECT master_get_active_worker_nodes(); SELECT master_get_active_worker_nodes();
-- add some shard placements to the cluster -- add some shard placements to the cluster
@ -48,6 +50,7 @@ TRUNCATE pg_dist_colocation;
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1390000; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1390000;
SELECT * FROM citus_activate_node('localhost', :worker_2_port); SELECT * FROM citus_activate_node('localhost', :worker_2_port);
CREATE TABLE cluster_management_test (col_1 text, col_2 int); CREATE TABLE cluster_management_test (col_1 text, col_2 int);
SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash'); SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash');
@ -58,7 +61,7 @@ SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHER
SELECT master_remove_node('localhost', :worker_2_port); SELECT master_remove_node('localhost', :worker_2_port);
SELECT master_get_active_worker_nodes(); SELECT master_get_active_worker_nodes();
-- insert a row so that master_disable_node() exercises closing connections -- insert a row so that citus_disable_node() exercises closing connections
CREATE TABLE test_reference_table (y int primary key, name text); CREATE TABLE test_reference_table (y int primary key, name text);
SELECT create_reference_table('test_reference_table'); SELECT create_reference_table('test_reference_table');
INSERT INTO test_reference_table VALUES (1, '1'); INSERT INTO test_reference_table VALUES (1, '1');
@ -66,16 +69,19 @@ INSERT INTO test_reference_table VALUES (1, '1');
-- try to remove a node with active placements and reference tables -- try to remove a node with active placements and reference tables
SELECT citus_remove_node('localhost', :worker_2_port); SELECT citus_remove_node('localhost', :worker_2_port);
-- try to disable a node with active placements see that node is removed -- try to disable a node with active placements
-- observe that a notification is displayed -- which should fail because there are some placements
SELECT master_disable_node('localhost', :worker_2_port); -- which are the only placements for a given shard
SELECT citus_disable_node('localhost', :worker_2_port);
SELECT master_get_active_worker_nodes(); SELECT master_get_active_worker_nodes();
-- try to disable a node which does not exist and see that an error is thrown -- try to disable a node which does not exist and see that an error is thrown
SELECT master_disable_node('localhost.noexist', 2345); SELECT citus_disable_node('localhost.noexist', 2345);
-- drop the table without leaving a shard placement behind (messes up other tests) -- drop the table without leaving a shard placement behind (messes up other tests)
SELECT master_activate_node('localhost', :worker_2_port); SELECT master_activate_node('localhost', :worker_2_port);
DROP TABLE test_reference_table, cluster_management_test; DROP TABLE test_reference_table, cluster_management_test;
-- create users like this so results of community and enterprise are same -- create users like this so results of community and enterprise are same
@ -90,7 +96,8 @@ GRANT EXECUTE ON FUNCTION master_activate_node(text,int) TO node_metadata_user;
GRANT EXECUTE ON FUNCTION master_add_inactive_node(text,int,int,noderole,name) TO node_metadata_user; GRANT EXECUTE ON FUNCTION master_add_inactive_node(text,int,int,noderole,name) TO node_metadata_user;
GRANT EXECUTE ON FUNCTION master_add_node(text,int,int,noderole,name) TO node_metadata_user; GRANT EXECUTE ON FUNCTION master_add_node(text,int,int,noderole,name) TO node_metadata_user;
GRANT EXECUTE ON FUNCTION master_add_secondary_node(text,int,text,int,name) TO node_metadata_user; GRANT EXECUTE ON FUNCTION master_add_secondary_node(text,int,text,int,name) TO node_metadata_user;
GRANT EXECUTE ON FUNCTION master_disable_node(text,int) TO node_metadata_user; GRANT EXECUTE ON FUNCTION citus_disable_node(text,int,bool) TO node_metadata_user;
GRANT EXECUTE ON FUNCTION citus_disable_node_and_wait(text,int,bool) TO node_metadata_user;
GRANT EXECUTE ON FUNCTION master_remove_node(text,int) TO node_metadata_user; GRANT EXECUTE ON FUNCTION master_remove_node(text,int) TO node_metadata_user;
GRANT EXECUTE ON FUNCTION master_update_node(int,text,int,bool,int) TO node_metadata_user; GRANT EXECUTE ON FUNCTION master_update_node(int,text,int,bool,int) TO node_metadata_user;
@ -107,7 +114,7 @@ DELETE FROM citus.pg_dist_object WHERE objid = 'public'::regnamespace::oid;
SET ROLE non_super_user; SET ROLE non_super_user;
SELECT 1 FROM master_add_inactive_node('localhost', :worker_2_port + 1); SELECT 1 FROM master_add_inactive_node('localhost', :worker_2_port + 1);
SELECT 1 FROM master_activate_node('localhost', :worker_2_port + 1); SELECT 1 FROM master_activate_node('localhost', :worker_2_port + 1);
SELECT 1 FROM master_disable_node('localhost', :worker_2_port + 1); SELECT 1 FROM citus_disable_node('localhost', :worker_2_port + 1);
SELECT 1 FROM master_remove_node('localhost', :worker_2_port + 1); SELECT 1 FROM master_remove_node('localhost', :worker_2_port + 1);
SELECT 1 FROM master_add_node('localhost', :worker_2_port + 1); SELECT 1 FROM master_add_node('localhost', :worker_2_port + 1);
SELECT 1 FROM master_add_secondary_node('localhost', :worker_2_port + 2, 'localhost', :worker_2_port); SELECT 1 FROM master_add_secondary_node('localhost', :worker_2_port + 2, 'localhost', :worker_2_port);
@ -119,7 +126,6 @@ SET citus.enable_object_propagation TO off; -- prevent master activate node to a
BEGIN; BEGIN;
SELECT 1 FROM master_add_inactive_node('localhost', :worker_2_port); SELECT 1 FROM master_add_inactive_node('localhost', :worker_2_port);
SELECT 1 FROM master_activate_node('localhost', :worker_2_port); SELECT 1 FROM master_activate_node('localhost', :worker_2_port);
SELECT 1 FROM master_disable_node('localhost', :worker_2_port);
SELECT 1 FROM master_remove_node('localhost', :worker_2_port); SELECT 1 FROM master_remove_node('localhost', :worker_2_port);
SELECT 1 FROM master_add_node('localhost', :worker_2_port); SELECT 1 FROM master_add_node('localhost', :worker_2_port);
SELECT 1 FROM master_add_secondary_node('localhost', :worker_2_port + 2, 'localhost', :worker_2_port); SELECT 1 FROM master_add_secondary_node('localhost', :worker_2_port + 2, 'localhost', :worker_2_port);
@ -316,7 +322,8 @@ SELECT 1 FROM master_add_inactive_node('localhost', 9996, groupid => :worker_2_g
-- check that you can add a seconary to a non-default cluster, and activate it, and remove it -- check that you can add a seconary to a non-default cluster, and activate it, and remove it
SELECT master_add_inactive_node('localhost', 9999, groupid => :worker_2_group, nodecluster => 'olap', noderole => 'secondary'); SELECT master_add_inactive_node('localhost', 9999, groupid => :worker_2_group, nodecluster => 'olap', noderole => 'secondary');
SELECT master_activate_node('localhost', 9999); SELECT master_activate_node('localhost', 9999);
SELECT master_disable_node('localhost', 9999); SELECT citus_disable_node('localhost', 9999);
SELECT public.wait_until_metadata_sync();
SELECT master_remove_node('localhost', 9999); SELECT master_remove_node('localhost', 9999);
-- check that you can't manually add two primaries to a group -- check that you can't manually add two primaries to a group

View File

@ -745,8 +745,8 @@ SELECT create_reference_table('dist_table_2');
ALTER TABLE dist_table_1 ADD COLUMN b int; ALTER TABLE dist_table_1 ADD COLUMN b int;
SELECT master_add_node('localhost', :master_port, groupid => 0); SELECT master_add_node('localhost', :master_port, groupid => 0);
SELECT master_disable_node('localhost', :worker_1_port); SELECT citus_disable_node_and_wait('localhost', :worker_1_port);
SELECT master_disable_node('localhost', :worker_2_port); SELECT citus_disable_node_and_wait('localhost', :worker_2_port);
SELECT master_remove_node('localhost', :worker_1_port); SELECT master_remove_node('localhost', :worker_1_port);
SELECT master_remove_node('localhost', :worker_2_port); SELECT master_remove_node('localhost', :worker_2_port);

View File

@ -98,7 +98,6 @@ SELECT r.a FROM ref r JOIN local_table lt on r.a = lt.a;
\c - - - :master_port \c - - - :master_port
SET search_path TO mx_add_coordinator,public; SET search_path TO mx_add_coordinator,public;
SELECT stop_metadata_sync_to_node('localhost', :master_port); SELECT stop_metadata_sync_to_node('localhost', :master_port);
SELECT * FROM ref ORDER BY a; SELECT * FROM ref ORDER BY a;
@ -117,7 +116,6 @@ SELECT create_reference_table('referece_table');
CREATE TABLE distributed_table(id int PRIMARY KEY, value_1 int); CREATE TABLE distributed_table(id int PRIMARY KEY, value_1 int);
SELECT create_distributed_table('distributed_table', 'id'); SELECT create_distributed_table('distributed_table', 'id');
INSERT INTO local_fkey_table SELECT i FROM generate_Series(0,100)i; INSERT INTO local_fkey_table SELECT i FROM generate_Series(0,100)i;
INSERT INTO referece_table SELECT i FROM generate_Series(0,100)i; INSERT INTO referece_table SELECT i FROM generate_Series(0,100)i;
INSERT INTO distributed_table SELECT i, i FROM generate_Series(0,100)i; INSERT INTO distributed_table SELECT i, i FROM generate_Series(0,100)i;
@ -154,8 +152,8 @@ alter table local_fkey_table ALTER COLUMN id TYPE int;
SET citus.force_max_query_parallelization TO ON; SET citus.force_max_query_parallelization TO ON;
alter table distributed_table ALTER COLUMN value_1 TYPE bigint; alter table distributed_table ALTER COLUMN value_1 TYPE bigint;
alter table distributed_table ALTER COLUMN value_1 TYPE int; alter table distributed_table ALTER COLUMN value_1 TYPE int;
SET client_min_messages TO error;
SET client_min_messages TO ERROR;
DROP TABLE distributed_table, referece_table, local_fkey_table; DROP TABLE distributed_table, referece_table, local_fkey_table;
SELECT master_remove_node('localhost', :master_port); SELECT master_remove_node('localhost', :master_port);

View File

@ -286,14 +286,14 @@ SELECT verify_metadata('localhost', :worker_1_port),
-- Don't drop the reference table so it has shards on the nodes being disabled -- Don't drop the reference table so it has shards on the nodes being disabled
DROP TABLE dist_table_1, dist_table_2; DROP TABLE dist_table_1, dist_table_2;
SELECT 1 FROM master_disable_node('localhost', :worker_2_port); SELECT pg_catalog.citus_disable_node_and_wait('localhost', :worker_2_port);
SELECT verify_metadata('localhost', :worker_1_port); SELECT verify_metadata('localhost', :worker_1_port);
SELECT 1 FROM master_activate_node('localhost', :worker_2_port); SELECT 1 FROM master_activate_node('localhost', :worker_2_port);
SELECT verify_metadata('localhost', :worker_1_port); SELECT verify_metadata('localhost', :worker_1_port);
------------------------------------------------------------------------------------ ------------------------------------------------------------------------------------
-- Test master_disable_node() when the node that is being disabled is actually down -- Test citus_disable_node_and_wait() when the node that is being disabled is actually down
------------------------------------------------------------------------------------ ------------------------------------------------------------------------------------
SELECT master_update_node(:nodeid_2, 'localhost', 1); SELECT master_update_node(:nodeid_2, 'localhost', 1);
SELECT wait_until_metadata_sync(30000); SELECT wait_until_metadata_sync(30000);
@ -301,12 +301,9 @@ SELECT wait_until_metadata_sync(30000);
-- set metadatasynced so we try porpagating metadata changes -- set metadatasynced so we try porpagating metadata changes
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid IN (:nodeid_1, :nodeid_2); UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid IN (:nodeid_1, :nodeid_2);
-- should not error out, master_disable_node is tolerant for node failures -- should not error out, citus_disable_node is tolerant for node failures
SELECT 1 FROM master_disable_node('localhost', 1); -- but we should not wait metadata syncing to finish as this node is down
SELECT 1 FROM citus_disable_node('localhost', 1, true);
-- try again after stopping metadata sync
SELECT stop_metadata_sync_to_node('localhost', 1);
SELECT 1 FROM master_disable_node('localhost', 1);
SELECT verify_metadata('localhost', :worker_1_port); SELECT verify_metadata('localhost', :worker_1_port);
@ -317,7 +314,7 @@ SELECT 1 FROM master_activate_node('localhost', :worker_2_port);
SELECT verify_metadata('localhost', :worker_1_port); SELECT verify_metadata('localhost', :worker_1_port);
------------------------------------------------------------------------------------ ------------------------------------------------------------------------------------
-- Test master_disable_node() when the other node is down -- Test citus_disable_node_and_wait() when the other node is down
------------------------------------------------------------------------------------ ------------------------------------------------------------------------------------
-- node 1 is down. -- node 1 is down.
SELECT master_update_node(:nodeid_1, 'localhost', 1); SELECT master_update_node(:nodeid_1, 'localhost', 1);
@ -326,12 +323,13 @@ SELECT wait_until_metadata_sync(30000);
-- set metadatasynced so we try porpagating metadata changes -- set metadatasynced so we try porpagating metadata changes
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid IN (:nodeid_1, :nodeid_2); UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid IN (:nodeid_1, :nodeid_2);
-- should error out -- should not error out, citus_disable_node is tolerant for node failures
SELECT 1 FROM master_disable_node('localhost', :worker_2_port); -- but we should not wait metadata syncing to finish as this node is down
SELECT 1 FROM citus_disable_node('localhost', :worker_2_port);
-- try again after stopping metadata sync -- try again after stopping metadata sync
SELECT stop_metadata_sync_to_node('localhost', 1); SELECT stop_metadata_sync_to_node('localhost', 1);
SELECT 1 FROM master_disable_node('localhost', :worker_2_port); SELECT 1 FROM citus_disable_node_and_wait('localhost', :worker_2_port);
-- bring up node 1 -- bring up node 1
SELECT master_update_node(:nodeid_1, 'localhost', :worker_1_port); SELECT master_update_node(:nodeid_1, 'localhost', :worker_1_port);

View File

@ -48,7 +48,8 @@ SELECT create_reference_table('remove_node_reference_table');
SELECT 1 FROM master_add_node('localhost', 9001, groupid=>:worker_2_group, noderole=>'secondary'); SELECT 1 FROM master_add_node('localhost', 9001, groupid=>:worker_2_group, noderole=>'secondary');
SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group; SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group;
-- make sure when we disable a secondary we don't remove any placements -- make sure when we disable a secondary we don't remove any placements
SELECT master_disable_node('localhost', 9001); SELECT citus_disable_node('localhost', 9001);
SELECT public.wait_until_metadata_sync();
SELECT isactive FROM pg_dist_node WHERE nodeport = 9001; SELECT isactive FROM pg_dist_node WHERE nodeport = 9001;
SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group; SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group;
-- make sure when we activate a secondary we don't add any placements -- make sure when we activate a secondary we don't add any placements
@ -130,7 +131,8 @@ SELECT master_remove_node('localhost', :worker_2_port);
SELECT 1 FROM master_add_node('localhost', :worker_2_port); SELECT 1 FROM master_add_node('localhost', :worker_2_port);
-- try to disable the node before removing it (this used to crash) -- try to disable the node before removing it (this used to crash)
SELECT master_disable_node('localhost', :worker_2_port); SELECT citus_disable_node('localhost', :worker_2_port);
SELECT public.wait_until_metadata_sync();
SELECT master_remove_node('localhost', :worker_2_port); SELECT master_remove_node('localhost', :worker_2_port);
-- re-add the node for the next test -- re-add the node for the next test
@ -544,9 +546,9 @@ SET citus.replicate_reference_tables_on_activate TO off;
SELECT 1 FROM master_add_node('localhost', :worker_2_port); SELECT 1 FROM master_add_node('localhost', :worker_2_port);
-- test with master_disable_node -- test with citus_disable_node_and_wait
-- status before master_disable_node -- status before citus_disable_node_and_wait
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
SELECT SELECT
@ -579,9 +581,10 @@ ORDER BY shardid ASC;
\c - - - :master_port \c - - - :master_port
SELECT master_disable_node('localhost', :worker_2_port); SELECT citus_disable_node('localhost', :worker_2_port);
SELECT public.wait_until_metadata_sync();
-- status after master_disable_node -- status after citus_disable_node_and_wait
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
SELECT SELECT

View File

@ -19,6 +19,10 @@ SELECT citus_relation_size('non_distributed_table');
SELECT citus_total_relation_size('non_distributed_table'); SELECT citus_total_relation_size('non_distributed_table');
DROP TABLE non_distributed_table; DROP TABLE non_distributed_table;
-- fix broken placements via disabling the node
SET client_min_messages TO ERROR;
SELECT replicate_table_shards('lineitem_hash_part', shard_replication_factor:=2, shard_transfer_mode:='block_writes');
-- Tests on distributed table with replication factor > 1 -- Tests on distributed table with replication factor > 1
VACUUM (FULL) lineitem_hash_part; VACUUM (FULL) lineitem_hash_part;

View File

@ -134,3 +134,4 @@ BEGIN
END LOOP; END LOOP;
END; END;
$$ LANGUAGE plpgsql; $$ LANGUAGE plpgsql;

View File

@ -54,3 +54,14 @@ CREATE OR REPLACE FUNCTION pg_catalog.partition_task_list_results(resultIdPrefix
CREATE OR REPLACE FUNCTION top_transaction_context_size() RETURNS BIGINT CREATE OR REPLACE FUNCTION top_transaction_context_size() RETURNS BIGINT
LANGUAGE C STRICT VOLATILE LANGUAGE C STRICT VOLATILE
AS 'citus', $$top_transaction_context_size$$; AS 'citus', $$top_transaction_context_size$$;
CREATE OR REPLACE FUNCTION pg_catalog.citus_disable_node_and_wait(nodename text, nodeport integer, force bool DEFAULT false)
RETURNS void
LANGUAGE plpgsql
AS $function$
BEGIN
PERFORM pg_catalog.citus_disable_node(nodename, nodeport, force);
PERFORM public.wait_until_metadata_sync(30000);
END;
$function$;

View File

@ -0,0 +1,69 @@
CREATE SCHEMA disable_node_with_replicated_tables;
SET search_path TO disable_node_with_replicated_tables;
SET citus.next_shard_id TO 101500;
SET citus.shard_replication_factor TO 2;
CREATE TABLE replicated(a int, b int);
SELECT create_distributed_table('replicated', 'a', shard_count:=2);
CREATE TABLE ref (a int, b int);
SELECT create_reference_table('ref');
INSERT INTO replicated SELECT i,i FROM generate_series(0,10)i;
INSERT INTO ref SELECT i,i FROM generate_series(0,10)i;
-- should be successfully disable node
SELECT citus_disable_node('localhost', :worker_2_port, true);
SELECT public.wait_until_metadata_sync();
-- the placement should be removed both from the coordinator
-- and from the workers
SELECT count(*) FROM pg_dist_placement WHERE shardid IN (101500, 101501, 101502);
\c - - - :worker_1_port
SELECT count(*) FROM pg_dist_placement WHERE shardid IN (101500, 101501, 101502);
SET search_path TO disable_node_with_replicated_tables;
-- should be able to ingest data from both the worker and the coordinator
INSERT INTO replicated SELECT i,i FROM generate_series(0,10)i;
INSERT INTO ref SELECT i,i FROM generate_series(0,10)i;
\c - - - :master_port
SET search_path TO disable_node_with_replicated_tables;
-- should be able to ingest data from both the worker and the coordinator
INSERT INTO replicated SELECT i,i FROM generate_series(0,10)i;
INSERT INTO ref SELECT i,i FROM generate_series(0,10)i;
-- now, query with round-robin policy such that
-- each query should hit different replicas
SET citus.task_assignment_policy to "round-robin";
SELECT count(*) FROM ref;
SELECT count(*) FROM ref;
SELECT count(*) FROM replicated;
SELECT count(*) FROM replicated;
-- now, we should be able to replicate the shards back
SET client_min_messages TO ERROR;
SELECT 1 FROM citus_activate_node('localhost', :worker_2_port);
SELECT 1 FROM replicate_table_shards('replicated', shard_replication_factor:=2, shard_transfer_mode:='block_writes');
RESET client_min_messages;
-- should be able to ingest data from both the worker and the coordinator
INSERT INTO replicated SELECT i,i FROM generate_series(0,10)i;
INSERT INTO ref SELECT i,i FROM generate_series(0,10)i;
-- now, query with round-robin policy such that
-- each query should hit different replicas
SET citus.task_assignment_policy to "round-robin";
SELECT count(*) FROM ref;
SELECT count(*) FROM ref;
SELECT count(*) FROM replicated;
SELECT count(*) FROM replicated;
SET client_min_messages TO ERROR;
DROP SCHEMA disable_node_with_replicated_tables CASCADE;

View File

@ -550,8 +550,9 @@ CREATE TABLE test_schema_support.nation_hash (
n_comment varchar(152) n_comment varchar(152)
); );
SELECT master_create_distributed_table('test_schema_support.nation_hash', 'n_nationkey', 'hash'); SET citus.shard_count TO 4;
SELECT master_create_worker_shards('test_schema_support.nation_hash', 4, 1); SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('test_schema_support.nation_hash', 'n_nationkey', 'hash');
CREATE TABLE test_schema_support.nation_hash2 ( CREATE TABLE test_schema_support.nation_hash2 (
n_nationkey integer not null, n_nationkey integer not null,
@ -560,21 +561,24 @@ CREATE TABLE test_schema_support.nation_hash2 (
n_comment varchar(152) n_comment varchar(152)
); );
SELECT master_create_distributed_table('test_schema_support.nation_hash2', 'n_nationkey', 'hash'); SELECT create_distributed_table('test_schema_support.nation_hash2', 'n_nationkey', 'hash');
SELECT master_create_worker_shards('test_schema_support.nation_hash2', 4, 1);
-- Mark tables as coordinator replicated in order to be able to test replicate_table_shards
UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid IN
('test_schema_support.nation_hash2'::regclass, 'test_schema_support.nation_hash'::regclass);
-- Shard count before replication -- Shard count before replication
SELECT COUNT(*) FROM pg_dist_shard_placement; SELECT COUNT(*) FROM pg_dist_shard_placement;
SET search_path TO public; SET search_path TO public;
SELECT replicate_table_shards('test_schema_support.nation_hash', shard_transfer_mode:='block_writes'); SELECT replicate_table_shards('test_schema_support.nation_hash', shard_replication_factor:=2, max_shard_copies:=1, shard_transfer_mode:='block_writes');
-- Confirm replication -- Confirm replication, both tables replicated due to colocation
SELECT COUNT(*) FROM pg_dist_shard_placement; SELECT COUNT(*) FROM pg_dist_shard_placement;
-- Test with search_path is set -- Test with search_path is set
SET search_path TO test_schema_support; SET search_path TO test_schema_support;
SELECT replicate_table_shards('nation_hash2', shard_transfer_mode:='block_writes'); SELECT replicate_table_shards('nation_hash2', shard_replication_factor:=2, shard_transfer_mode:='block_writes');
-- Confirm replication -- Confirm replication
SELECT COUNT(*) FROM pg_dist_shard_placement; SELECT COUNT(*) FROM pg_dist_shard_placement;
@ -1229,7 +1233,7 @@ DROP TABLE tab;
-- we don't need the coordinator on pg_dist_node anymore -- we don't need the coordinator on pg_dist_node anymore
SELECT 1 FROM master_remove_node('localhost', :master_port); SELECT 1 FROM master_remove_node('localhost', :master_port);
SELECT public.wait_until_metadata_sync(30000);
-- --
-- Make sure that rebalance_table_shards() and replicate_table_shards() replicate -- Make sure that rebalance_table_shards() and replicate_table_shards() replicate
@ -1265,6 +1269,7 @@ SELECT replicate_table_shards('dist_table_test_3', max_shard_copies := 4, shar
SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'ref_table'::regclass; SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'ref_table'::regclass;
SELECT 1 FROM master_remove_node('localhost', :master_port); SELECT 1 FROM master_remove_node('localhost', :master_port);
SELECT public.wait_until_metadata_sync(30000);
CREATE TABLE rebalance_test_table(int_column int); CREATE TABLE rebalance_test_table(int_column int);
SELECT master_create_distributed_table('rebalance_test_table', 'int_column', 'append'); SELECT master_create_distributed_table('rebalance_test_table', 'int_column', 'append');
@ -1283,6 +1288,8 @@ SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE lo
DROP TABLE dist_table_test_3, rebalance_test_table, ref_table; DROP TABLE dist_table_test_3, rebalance_test_table, ref_table;
SELECT 1 FROM master_remove_node('localhost', :master_port); SELECT 1 FROM master_remove_node('localhost', :master_port);
SELECT public.wait_until_metadata_sync(30000);
-- reference table 2 will not have a replica identity, causing the rebalancer to not work -- reference table 2 will not have a replica identity, causing the rebalancer to not work
-- when ran in the default mode. Instead we need to change the shard transfer mode to make -- when ran in the default mode. Instead we need to change the shard transfer mode to make
@ -1297,6 +1304,7 @@ CREATE TABLE r2 (a int, b int);
-- node without the reference tables -- node without the reference tables
SELECT 1 from master_remove_node('localhost', :worker_2_port); SELECT 1 from master_remove_node('localhost', :worker_2_port);
SELECT public.wait_until_metadata_sync(30000);
SELECT create_distributed_table('t1','a'); SELECT create_distributed_table('t1','a');
SELECT create_reference_table('r1'); SELECT create_reference_table('r1');
@ -1321,6 +1329,7 @@ SELECT count(*) FROM pg_dist_partition;
-- executing the rebalancer -- executing the rebalancer
SELECT 1 from master_remove_node('localhost', :worker_2_port); SELECT 1 from master_remove_node('localhost', :worker_2_port);
SELECT public.wait_until_metadata_sync(30000);
CREATE TABLE r1 (a int PRIMARY KEY, b int); CREATE TABLE r1 (a int PRIMARY KEY, b int);
SELECT create_reference_table('r1'); SELECT create_reference_table('r1');
@ -1353,6 +1362,7 @@ DROP TABLE r1;
-- fail. -- fail.
SELECT 1 from master_remove_node('localhost', :worker_2_port); SELECT 1 from master_remove_node('localhost', :worker_2_port);
SELECT public.wait_until_metadata_sync(30000);
CREATE TABLE t1 (a int PRIMARY KEY, b int); CREATE TABLE t1 (a int PRIMARY KEY, b int);
CREATE TABLE r1 (a int PRIMARY KEY, b int); CREATE TABLE r1 (a int PRIMARY KEY, b int);
@ -1382,3 +1392,4 @@ JOIN pg_dist_shard_placement USING (shardid)
WHERE logicalrelid = 'r1'::regclass; WHERE logicalrelid = 'r1'::regclass;
DROP TABLE t1, r1; DROP TABLE t1, r1;

View File

@ -15,7 +15,7 @@ SET client_min_messages TO WARNING;
SELECT 1 FROM citus_set_coordinator_host('localhost', :master_port); SELECT 1 FROM citus_set_coordinator_host('localhost', :master_port);
-- coordinator cannot be disabled -- coordinator cannot be disabled
SELECT 1 FROM master_disable_node('localhost', :master_port); SELECT 1 FROM citus_disable_node('localhost', :master_port);
RESET client_min_messages; RESET client_min_messages;

View File

@ -180,21 +180,6 @@ SELECT hasmetadata, metadatasynced, shouldhaveshards FROM pg_dist_node WHERE nod
\c - - - :worker_2_port \c - - - :worker_2_port
SELECT hasmetadata, metadatasynced, shouldhaveshards FROM pg_dist_node WHERE nodeport IN (:worker_1_port, :worker_2_port) ORDER BY nodeport; SELECT hasmetadata, metadatasynced, shouldhaveshards FROM pg_dist_node WHERE nodeport IN (:worker_1_port, :worker_2_port) ORDER BY nodeport;
\c - - - :master_port
-- verify that mx workers are updated when disabling/activating nodes
SELECT citus_disable_node('localhost', :worker_1_port);
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
\c - - - :worker_2_port
SELECT nodeport, isactive FROM pg_dist_node WHERE nodeport IN (:worker_1_port, :worker_2_port) ORDER BY nodeport;
\c - - - :master_port
SET client_min_messages TO ERROR;
SELECT citus_activate_node('localhost', :worker_1_port);
\c - - - :worker_2_port
SELECT nodeport, isactive FROM pg_dist_node WHERE nodeport IN (:worker_1_port, :worker_2_port) ORDER BY nodeport;
\c - - - :master_port \c - - - :master_port
SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);