Nonblocking tenant isolation is supported by using split api. (#6167)

pull/6179/head
aykut-bozkurt 2022-08-17 11:13:07 +03:00 committed by GitHub
parent 78a5013e24
commit be06d65721
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
46 changed files with 4042 additions and 174 deletions

View File

@ -27,8 +27,6 @@
/* declarations for dynamic loading */ /* declarations for dynamic loading */
PG_FUNCTION_INFO_V1(citus_split_shard_by_split_points); PG_FUNCTION_INFO_V1(citus_split_shard_by_split_points);
static SplitMode LookupSplitMode(Oid shardTransferModeOid);
/* /*
* citus_split_shard_by_split_points(shard_id bigint, split_points text[], node_ids integer[], shard_transfer_mode citus.shard_transfer_mode) * citus_split_shard_by_split_points(shard_id bigint, split_points text[], node_ids integer[], shard_transfer_mode citus.shard_transfer_mode)
* Split source shard into multiple shards using the given split points. * Split source shard into multiple shards using the given split points.
@ -81,13 +79,14 @@ LookupSplitMode(Oid shardTransferModeOid)
{ {
shardSplitMode = BLOCKING_SPLIT; shardSplitMode = BLOCKING_SPLIT;
} }
else if (strncmp(enumLabel, "force_logical", NAMEDATALEN) == 0)
/* TODO(saawaek): Handle this appropriately based on replica identity */
else if (strncmp(enumLabel, "auto", NAMEDATALEN) == 0 ||
strncmp(enumLabel, "force_logical", NAMEDATALEN) == 0)
{ {
shardSplitMode = NON_BLOCKING_SPLIT; shardSplitMode = NON_BLOCKING_SPLIT;
} }
else if (strncmp(enumLabel, "auto", NAMEDATALEN) == 0)
{
shardSplitMode = AUTO_SPLIT;
}
else else
{ {
/* We will not get here as postgres will validate the enum value. */ /* We will not get here as postgres will validate the enum value. */

View File

@ -41,7 +41,6 @@
#include "utils/lsyscache.h" #include "utils/lsyscache.h"
#include "utils/typcache.h" #include "utils/typcache.h"
/* declarations for dynamic loading */ /* declarations for dynamic loading */
PG_FUNCTION_INFO_V1(isolate_tenant_to_new_shard); PG_FUNCTION_INFO_V1(isolate_tenant_to_new_shard);
PG_FUNCTION_INFO_V1(worker_hash); PG_FUNCTION_INFO_V1(worker_hash);
@ -60,6 +59,7 @@ isolate_tenant_to_new_shard(PG_FUNCTION_ARGS)
Oid relationId = PG_GETARG_OID(0); Oid relationId = PG_GETARG_OID(0);
Datum inputDatum = PG_GETARG_DATUM(1); Datum inputDatum = PG_GETARG_DATUM(1);
text *cascadeOptionText = PG_GETARG_TEXT_P(2); text *cascadeOptionText = PG_GETARG_TEXT_P(2);
Oid shardTransferModeOid = PG_GETARG_OID(3);
EnsureTableOwner(relationId); EnsureTableOwner(relationId);
@ -163,7 +163,8 @@ isolate_tenant_to_new_shard(PG_FUNCTION_ARGS)
nodeIdsForPlacementList = lappend_int(nodeIdsForPlacementList, sourceNodeId); nodeIdsForPlacementList = lappend_int(nodeIdsForPlacementList, sourceNodeId);
} }
SplitShard(BLOCKING_SPLIT, SplitMode splitMode = LookupSplitMode(shardTransferModeOid);
SplitShard(splitMode,
ISOLATE_TENANT_TO_NEW_SHARD, ISOLATE_TENANT_TO_NEW_SHARD,
sourceShard->shardId, sourceShard->shardId,
shardSplitPointsList, shardSplitPointsList,

View File

@ -70,7 +70,6 @@ typedef struct ShardCommandList
} ShardCommandList; } ShardCommandList;
/* local function forward declarations */ /* local function forward declarations */
static void VerifyTablesHaveReplicaIdentity(List *colocatedTableList);
static bool RelationCanPublishAllModifications(Oid relationId); static bool RelationCanPublishAllModifications(Oid relationId);
static bool CanUseLogicalReplication(Oid relationId, char shardReplicationMode); static bool CanUseLogicalReplication(Oid relationId, char shardReplicationMode);
static void ErrorIfTableCannotBeReplicated(Oid relationId); static void ErrorIfTableCannotBeReplicated(Oid relationId);
@ -591,7 +590,7 @@ ErrorIfMoveUnsupportedTableType(Oid relationId)
* do not have a replica identity, which is required for logical replication * do not have a replica identity, which is required for logical replication
* to replicate UPDATE and DELETE commands. * to replicate UPDATE and DELETE commands.
*/ */
static void void
VerifyTablesHaveReplicaIdentity(List *colocatedTableList) VerifyTablesHaveReplicaIdentity(List *colocatedTableList)
{ {
ListCell *colocatedTableCell = NULL; ListCell *colocatedTableCell = NULL;

View File

@ -29,6 +29,7 @@
#include "distributed/remote_commands.h" #include "distributed/remote_commands.h"
#include "distributed/shard_split.h" #include "distributed/shard_split.h"
#include "distributed/reference_table_utils.h" #include "distributed/reference_table_utils.h"
#include "distributed/repair_shards.h"
#include "distributed/multi_partitioning_utils.h" #include "distributed/multi_partitioning_utils.h"
#include "distributed/worker_manager.h" #include "distributed/worker_manager.h"
#include "distributed/worker_transaction.h" #include "distributed/worker_transaction.h"
@ -64,6 +65,8 @@ typedef struct GroupedDummyShards
} GroupedDummyShards; } GroupedDummyShards;
/* Function declarations */ /* Function declarations */
static void ErrorIfCannotSplitShard(SplitOperation splitOperation,
ShardInterval *sourceShard);
static void ErrorIfCannotSplitShardExtended(SplitOperation splitOperation, static void ErrorIfCannotSplitShardExtended(SplitOperation splitOperation,
ShardInterval *shardIntervalToSplit, ShardInterval *shardIntervalToSplit,
List *shardSplitPointsList, List *shardSplitPointsList,
@ -136,7 +139,6 @@ static void DropDummyShards(HTAB *mapOfDummyShardToPlacement);
static void DropDummyShard(MultiConnection *connection, ShardInterval *shardInterval); static void DropDummyShard(MultiConnection *connection, ShardInterval *shardInterval);
static uint64 GetNextShardIdForSplitChild(void); static uint64 GetNextShardIdForSplitChild(void);
/* Customize error message strings based on operation type */ /* Customize error message strings based on operation type */
static const char *const SplitOperationName[] = static const char *const SplitOperationName[] =
{ {
@ -155,7 +157,7 @@ static const char *const SplitTargetName[] =
* ErrorIfCannotSplitShard checks relation kind and invalid shards. It errors * ErrorIfCannotSplitShard checks relation kind and invalid shards. It errors
* out if we are not able to split the given shard. * out if we are not able to split the given shard.
*/ */
void static void
ErrorIfCannotSplitShard(SplitOperation splitOperation, ShardInterval *sourceShard) ErrorIfCannotSplitShard(SplitOperation splitOperation, ShardInterval *sourceShard)
{ {
Oid relationId = sourceShard->relationId; Oid relationId = sourceShard->relationId;
@ -372,10 +374,22 @@ SplitShard(SplitMode splitMode,
SplitOperationName[splitOperation], SplitOperationName[splitOperation],
SplitTargetName[splitOperation]))); SplitTargetName[splitOperation])));
} }
else if (PlacementMovedUsingLogicalReplicationInTX)
{
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("multiple shard movements/splits via logical "
"replication in the same transaction is currently "
"not supported")));
}
ShardInterval *shardIntervalToSplit = LoadShardInterval(shardIdToSplit); ShardInterval *shardIntervalToSplit = LoadShardInterval(shardIdToSplit);
List *colocatedTableList = ColocatedTableList(shardIntervalToSplit->relationId); List *colocatedTableList = ColocatedTableList(shardIntervalToSplit->relationId);
if (splitMode == AUTO_SPLIT)
{
VerifyTablesHaveReplicaIdentity(colocatedTableList);
}
Oid relationId = RelationIdForShard(shardIdToSplit); Oid relationId = RelationIdForShard(shardIdToSplit);
AcquirePlacementColocationLock(relationId, ExclusiveLock, "split"); AcquirePlacementColocationLock(relationId, ExclusiveLock, "split");
@ -433,6 +447,8 @@ SplitShard(SplitMode splitMode,
shardIntervalToSplit, shardIntervalToSplit,
shardSplitPointsList, shardSplitPointsList,
workersForPlacementList); workersForPlacementList);
PlacementMovedUsingLogicalReplicationInTX = true;
} }
} }
@ -1385,10 +1401,6 @@ NonBlockingShardSplit(SplitOperation splitOperation,
shardGroupSplitIntervalListList, workersForPlacementList, shardGroupSplitIntervalListList, workersForPlacementList,
snapshot); snapshot);
/* Used for testing */
ConflictOnlyWithIsolationTesting();
/* /*
* 9) Create replica identities, this needs to be done before enabling * 9) Create replica identities, this needs to be done before enabling
* the subscriptions. * the subscriptions.
@ -1414,6 +1426,9 @@ NonBlockingShardSplit(SplitOperation splitOperation,
/* 14) Wait for subscribers to catchup till source LSN */ /* 14) Wait for subscribers to catchup till source LSN */
WaitForAllSubscriptionsToCatchUp(sourceConnection, groupedLogicalRepTargetsHash); WaitForAllSubscriptionsToCatchUp(sourceConnection, groupedLogicalRepTargetsHash);
/* Used for testing */
ConflictOnlyWithIsolationTesting();
/* 15) Block writes on source shards */ /* 15) Block writes on source shards */
BlockWritesToShardList(sourceColocatedShardIntervalList); BlockWritesToShardList(sourceColocatedShardIntervalList);

View File

@ -73,3 +73,6 @@ DROP FUNCTION pg_catalog.get_all_active_transactions(OUT datid oid, OUT process_
#include "udfs/worker_split_shard_replication_setup/11.1-1.sql" #include "udfs/worker_split_shard_replication_setup/11.1-1.sql"
#include "udfs/citus_isolation_test_session_is_blocked/11.1-1.sql" #include "udfs/citus_isolation_test_session_is_blocked/11.1-1.sql"
#include "udfs/replicate_reference_tables/11.1-1.sql" #include "udfs/replicate_reference_tables/11.1-1.sql"
DROP FUNCTION pg_catalog.isolate_tenant_to_new_shard(table_name regclass, tenant_id "any", cascade_option text);
#include "udfs/isolate_tenant_to_new_shard/11.1-1.sql"

View File

@ -92,3 +92,6 @@ DROP FUNCTION pg_catalog.citus_locks();
DROP FUNCTION pg_catalog.replicate_reference_tables(citus.shard_transfer_mode); DROP FUNCTION pg_catalog.replicate_reference_tables(citus.shard_transfer_mode);
#include "../udfs/replicate_reference_tables/9.3-2.sql" #include "../udfs/replicate_reference_tables/9.3-2.sql"
DROP FUNCTION pg_catalog.isolate_tenant_to_new_shard(table_name regclass, tenant_id "any", cascade_option text, shard_transfer_mode citus.shard_transfer_mode);
#include "../udfs/isolate_tenant_to_new_shard/8.0-1.sql"

View File

@ -0,0 +1,14 @@
CREATE FUNCTION pg_catalog.isolate_tenant_to_new_shard(
table_name regclass,
tenant_id "any",
cascade_option text DEFAULT '',
shard_transfer_mode citus.shard_transfer_mode DEFAULT 'auto')
RETURNS bigint LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$isolate_tenant_to_new_shard$$;
COMMENT ON FUNCTION pg_catalog.isolate_tenant_to_new_shard(
table_name regclass,
tenant_id "any",
cascade_option text,
shard_transfer_mode citus.shard_transfer_mode)
IS 'isolate a tenant to its own shard and return the new shard id';

View File

@ -0,0 +1,7 @@
CREATE FUNCTION pg_catalog.isolate_tenant_to_new_shard(table_name regclass, tenant_id "any", cascade_option text DEFAULT '')
RETURNS bigint
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$isolate_tenant_to_new_shard$$;
COMMENT ON FUNCTION pg_catalog.isolate_tenant_to_new_shard(table_name regclass, tenant_id "any", cascade_option text)
IS 'isolate a tenant to its own shard and return the new shard id';

View File

@ -0,0 +1,14 @@
CREATE FUNCTION pg_catalog.isolate_tenant_to_new_shard(
table_name regclass,
tenant_id "any",
cascade_option text DEFAULT '',
shard_transfer_mode citus.shard_transfer_mode DEFAULT 'auto')
RETURNS bigint LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$isolate_tenant_to_new_shard$$;
COMMENT ON FUNCTION pg_catalog.isolate_tenant_to_new_shard(
table_name regclass,
tenant_id "any",
cascade_option text,
shard_transfer_mode citus.shard_transfer_mode)
IS 'isolate a tenant to its own shard and return the new shard id';

View File

@ -16,3 +16,4 @@ extern uint64 ShardListSizeInBytes(List *colocatedShardList,
extern void ErrorIfMoveUnsupportedTableType(Oid relationId); extern void ErrorIfMoveUnsupportedTableType(Oid relationId);
extern void CopyShardsToNode(WorkerNode *sourceNode, WorkerNode *targetNode, extern void CopyShardsToNode(WorkerNode *sourceNode, WorkerNode *targetNode,
List *shardIntervalList, char *snapshotName); List *shardIntervalList, char *snapshotName);
extern void VerifyTablesHaveReplicaIdentity(List *colocatedTableList);

View File

@ -16,9 +16,11 @@
typedef enum SplitMode typedef enum SplitMode
{ {
BLOCKING_SPLIT = 0, BLOCKING_SPLIT = 0,
NON_BLOCKING_SPLIT = 1 NON_BLOCKING_SPLIT = 1,
AUTO_SPLIT = 2
} SplitMode; } SplitMode;
/* /*
* User Scenario calling Split Shard API. * User Scenario calling Split Shard API.
* The 'SplitOperation' type is used to customize info/error messages based on user scenario. * The 'SplitOperation' type is used to customize info/error messages based on user scenario.
@ -40,9 +42,8 @@ extern void SplitShard(SplitMode splitMode,
List *shardSplitPointsList, List *shardSplitPointsList,
List *nodeIdsForPlacementList); List *nodeIdsForPlacementList);
/* TODO(niupre): Make all these APIs private when all consumers (Example : ISOLATE_TENANT_TO_NEW_SHARD) directly call 'SplitShard' API. */
extern void ErrorIfCannotSplitShard(SplitOperation splitOperation,
ShardInterval *sourceShard);
extern void DropShardList(List *shardIntervalList); extern void DropShardList(List *shardIntervalList);
extern SplitMode LookupSplitMode(Oid shardTransferModeOid);
#endif /* SHARDSPLIT_H_ */ #endif /* SHARDSPLIT_H_ */

View File

@ -9,4 +9,5 @@ test: failure_online_move_shard_placement
test: failure_on_create_subscription test: failure_on_create_subscription
test: failure_offline_move_shard_placement test: failure_offline_move_shard_placement
test: failure_tenant_isolation test: failure_tenant_isolation
test: failure_tenant_isolation_nonblocking
test: check_mx test: check_mx

View File

@ -13,5 +13,6 @@ test: isolation_ref2ref_foreign_keys_enterprise
test: isolation_pg_send_cancellation test: isolation_pg_send_cancellation
test: isolation_shard_move_vs_start_metadata_sync test: isolation_shard_move_vs_start_metadata_sync
test: isolation_tenant_isolation test: isolation_tenant_isolation
test: isolation_tenant_isolation_nonblocking
test: isolation_blocking_shard_split test: isolation_blocking_shard_split
test: isolation_blocking_shard_split_with_fkey_to_reference test: isolation_blocking_shard_split_with_fkey_to_reference

View File

@ -7,6 +7,7 @@ test: multi_cluster_management
test: multi_test_catalog_views test: multi_test_catalog_views
test: multi_data_types test: multi_data_types
test: multi_tenant_isolation test: multi_tenant_isolation
test: multi_tenant_isolation_nonblocking
# -------- # --------
# Tests that require adding coordinator to pg_dist_node # Tests that require adding coordinator to pg_dist_node

View File

@ -635,7 +635,7 @@ SELECT create_distributed_table ('shard_split_table', 'a');
(1 row) (1 row)
SELECT 1 FROM isolate_tenant_to_new_shard('shard_split_table', 5); SELECT 1 FROM isolate_tenant_to_new_shard('shard_split_table', 5, shard_transfer_mode => 'block_writes');
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1

View File

@ -22,7 +22,7 @@ SELECT citus_add_local_table_to_metadata('citus_local_table');
(1 row) (1 row)
-- isolate_tenant_to_new_shard is not supported -- isolate_tenant_to_new_shard is not supported
SELECT isolate_tenant_to_new_shard('citus_local_table', 100); SELECT isolate_tenant_to_new_shard('citus_local_table', 100, shard_transfer_mode => 'block_writes');
ERROR: cannot isolate tenant because tenant isolation is only support for hash distributed tables ERROR: cannot isolate tenant because tenant isolation is only support for hash distributed tables
-- master_copy_shard_placement is not supported -- master_copy_shard_placement is not supported
SELECT master_copy_shard_placement(shardid, 'localhost', :master_port, 'localhost', :worker_1_port, false) SELECT master_copy_shard_placement(shardid, 'localhost', :master_port, 'localhost', :worker_1_port, false)

View File

@ -505,7 +505,7 @@ SET citus.shard_replication_factor TO 1;
8999002, -- sensors_old 8999002, -- sensors_old
ARRAY['-2127770000'], ARRAY['-2127770000'],
ARRAY[:worker_1_node, :worker_2_node], ARRAY[:worker_1_node, :worker_2_node],
'auto'); 'force_logical');
citus_split_shard_by_split_points citus_split_shard_by_split_points
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -9,6 +9,10 @@ Here is a high level overview of test plan:
6. Trigger Split on both shards of 'sensors'. This will also split co-located tables. 6. Trigger Split on both shards of 'sensors'. This will also split co-located tables.
7. Move one of the split shard to test Split -> ShardMove. 7. Move one of the split shard to test Split -> ShardMove.
8. Split an already split shard second time on a different schema. 8. Split an already split shard second time on a different schema.
9. Create a colocated table with no replica identity.
10. Show we do not allow Split with the shard transfer mode 'auto' if any colocated table has no replica identity.
11. Drop the colocated table with no replica identity.
12. Show we allow Split with the shard transfer mode 'auto' if all colocated tables has replica identity.
*/ */
CREATE SCHEMA "citus_split_test_schema"; CREATE SCHEMA "citus_split_test_schema";
CREATE ROLE test_shard_split_role WITH LOGIN; CREATE ROLE test_shard_split_role WITH LOGIN;
@ -428,6 +432,103 @@ SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, node
(18 rows) (18 rows)
-- END: Split second time on another schema -- END: Split second time on another schema
-- BEGIN: Create a co-located table with no replica identity.
CREATE TABLE table_no_rep_id (measureid integer);
SELECT create_distributed_table('table_no_rep_id', 'measureid', colocate_with:='sensors');
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- END: Create a co-located table with no replica identity.
-- BEGIN: Split a shard with shard_transfer_mode='auto' and with a colocated table with no replica identity
SET citus.next_shard_id TO 8981041;
SELECT pg_catalog.citus_split_shard_by_split_points(
8981031,
ARRAY['-2120000000'],
ARRAY[:worker_1_node, :worker_2_node],
'auto');
ERROR: cannot use logical replication to transfer shards of the relation table_no_rep_id since it doesn't have a REPLICA IDENTITY or PRIMARY KEY
DETAIL: UPDATE and DELETE commands on the shard will error out during logical replication unless there is a REPLICA IDENTITY or PRIMARY KEY.
HINT: If you wish to continue without a replica identity set the shard_transfer_mode to 'force_logical' or 'block_writes'.
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport
FROM pg_dist_shard AS shard
INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid
INNER JOIN pg_dist_node node ON placement.groupid = node.groupid
INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid
WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass)
ORDER BY logicalrelid, shardminvalue::BIGINT;
shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport
---------------------------------------------------------------------
8981031 | sensors | -2147483648 | -2100000000 | localhost | 57637
8981032 | sensors | -2099999999 | -1073741824 | localhost | 57638
8981008 | sensors | -1073741823 | -1 | localhost | 57638
8981013 | sensors | 0 | 536870911 | localhost | 57637
8981014 | sensors | 536870912 | 1610612735 | localhost | 57637
8981015 | sensors | 1610612736 | 2147483647 | localhost | 57638
8981033 | colocated_dist_table | -2147483648 | -2100000000 | localhost | 57637
8981034 | colocated_dist_table | -2099999999 | -1073741824 | localhost | 57638
8981010 | colocated_dist_table | -1073741823 | -1 | localhost | 57638
8981016 | colocated_dist_table | 0 | 536870911 | localhost | 57637
8981017 | colocated_dist_table | 536870912 | 1610612735 | localhost | 57637
8981018 | colocated_dist_table | 1610612736 | 2147483647 | localhost | 57638
8981035 | table_with_index_rep_identity | -2147483648 | -2100000000 | localhost | 57637
8981036 | table_with_index_rep_identity | -2099999999 | -1073741824 | localhost | 57638
8981012 | table_with_index_rep_identity | -1073741823 | -1 | localhost | 57638
8981019 | table_with_index_rep_identity | 0 | 536870911 | localhost | 57637
8981020 | table_with_index_rep_identity | 536870912 | 1610612735 | localhost | 57637
8981021 | table_with_index_rep_identity | 1610612736 | 2147483647 | localhost | 57638
(18 rows)
-- END: Split a shard with shard_transfer_mode='auto' and with a colocated table with no replica identity
-- BEGIN: Drop the co-located table with no replica identity.
DROP TABLE table_no_rep_id;
-- END: Drop the co-located table with no replica identity.
-- BEGIN: Split a shard with shard_transfer_mode='auto' and with all colocated tables has replica identity
SET citus.next_shard_id TO 8981041;
SELECT pg_catalog.citus_split_shard_by_split_points(
8981031,
ARRAY['-2120000000'],
ARRAY[:worker_1_node, :worker_2_node],
'auto');
citus_split_shard_by_split_points
---------------------------------------------------------------------
(1 row)
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport
FROM pg_dist_shard AS shard
INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid
INNER JOIN pg_dist_node node ON placement.groupid = node.groupid
INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid
WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass)
ORDER BY logicalrelid, shardminvalue::BIGINT;
shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport
---------------------------------------------------------------------
8981041 | sensors | -2147483648 | -2120000000 | localhost | 57637
8981042 | sensors | -2119999999 | -2100000000 | localhost | 57638
8981032 | sensors | -2099999999 | -1073741824 | localhost | 57638
8981008 | sensors | -1073741823 | -1 | localhost | 57638
8981013 | sensors | 0 | 536870911 | localhost | 57637
8981014 | sensors | 536870912 | 1610612735 | localhost | 57637
8981015 | sensors | 1610612736 | 2147483647 | localhost | 57638
8981043 | colocated_dist_table | -2147483648 | -2120000000 | localhost | 57637
8981044 | colocated_dist_table | -2119999999 | -2100000000 | localhost | 57638
8981034 | colocated_dist_table | -2099999999 | -1073741824 | localhost | 57638
8981010 | colocated_dist_table | -1073741823 | -1 | localhost | 57638
8981016 | colocated_dist_table | 0 | 536870911 | localhost | 57637
8981017 | colocated_dist_table | 536870912 | 1610612735 | localhost | 57637
8981018 | colocated_dist_table | 1610612736 | 2147483647 | localhost | 57638
8981045 | table_with_index_rep_identity | -2147483648 | -2120000000 | localhost | 57637
8981046 | table_with_index_rep_identity | -2119999999 | -2100000000 | localhost | 57638
8981036 | table_with_index_rep_identity | -2099999999 | -1073741824 | localhost | 57638
8981012 | table_with_index_rep_identity | -1073741823 | -1 | localhost | 57638
8981019 | table_with_index_rep_identity | 0 | 536870911 | localhost | 57637
8981020 | table_with_index_rep_identity | 536870912 | 1610612735 | localhost | 57637
8981021 | table_with_index_rep_identity | 1610612736 | 2147483647 | localhost | 57638
(21 rows)
-- END: Split a shard with shard_transfer_mode='auto' and with all colocated tables has replica identity
-- BEGIN: Validate Data Count -- BEGIN: Validate Data Count
SELECT COUNT(*) FROM sensors; SELECT COUNT(*) FROM sensors;
count count

View File

@ -101,7 +101,7 @@ FROM shard_ranges;
ERROR: Resulting shard count '64001' with split is greater than max shard count '64000' limit. ERROR: Resulting shard count '64001' with split is greater than max shard count '64000' limit.
-- UDF fails where source shard cannot be split further i.e min and max range is equal. -- UDF fails where source shard cannot be split further i.e min and max range is equal.
-- Create a Shard where range cannot be split further -- Create a Shard where range cannot be split further
SELECT isolate_tenant_to_new_shard('table_to_split', 1); SELECT isolate_tenant_to_new_shard('table_to_split', 1, shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard isolate_tenant_to_new_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
49761305 49761305

View File

@ -56,7 +56,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: connection not open ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx CONTEXT: while executing command on localhost:xxxxx
-- cancellation on colocated table creation -- cancellation on colocated table creation
@ -66,7 +66,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: canceling statement due to user request ERROR: canceling statement due to user request
-- failure on colocated table population -- failure on colocated table population
SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(302").kill()'); SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(302").kill()');
@ -75,7 +75,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(302").kill()');
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
-- cancellation on colocated table population -- cancellation on colocated table population
SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(302").cancel(' || :pid || ')'); SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(302").cancel(' || :pid || ')');
@ -84,7 +84,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(302").cancel(' ||
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: canceling statement due to user request ERROR: canceling statement due to user request
-- failure on colocated table constraints -- failure on colocated table constraints
SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_2 ADD CONSTRAINT").after(1).kill()'); SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_2 ADD CONSTRAINT").after(1).kill()');
@ -93,7 +93,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_2
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
-- cancellation on colocated table constraints -- cancellation on colocated table constraints
SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_2 ADD CONSTRAINT").after(2).cancel(' || :pid || ')'); SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_2 ADD CONSTRAINT").after(2).cancel(' || :pid || ')');
@ -102,7 +102,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_2
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: canceling statement due to user request ERROR: canceling statement due to user request
-- failure on table creation -- failure on table creation
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_1").kill()'); SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_1").kill()');
@ -111,7 +111,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: connection not open ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx CONTEXT: while executing command on localhost:xxxxx
-- cancellation on table creation -- cancellation on table creation
@ -121,7 +121,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: canceling statement due to user request ERROR: canceling statement due to user request
-- failure on table population -- failure on table population
SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(300").kill()'); SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(300").kill()');
@ -130,7 +130,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(300").kill()');
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
-- cancellation on table population -- cancellation on table population
SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(300").cancel(' || :pid || ')'); SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(300").cancel(' || :pid || ')');
@ -139,7 +139,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(300").cancel(' ||
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: canceling statement due to user request ERROR: canceling statement due to user request
-- failure on table constraints -- failure on table constraints
SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_1 ADD CONSTRAINT").after(1).kill()'); SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_1 ADD CONSTRAINT").after(1).kill()');
@ -148,7 +148,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_1
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
-- cancellation on table constraints -- cancellation on table constraints
SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_1 ADD CONSTRAINT").after(2).cancel(' || :pid || ')'); SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_1 ADD CONSTRAINT").after(2).cancel(' || :pid || ')');
@ -157,7 +157,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_1
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: canceling statement due to user request ERROR: canceling statement due to user request
-- failure on dropping old colocated shard -- failure on dropping old colocated shard
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").kill()'); SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").kill()');
@ -166,7 +166,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolatio
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: connection not open ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx CONTEXT: while executing command on localhost:xxxxx
-- cancellation on dropping old colocated shard -- cancellation on dropping old colocated shard
@ -176,7 +176,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolatio
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: canceling statement due to user request ERROR: canceling statement due to user request
-- failure on dropping old shard -- failure on dropping old shard
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").kill()'); SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").kill()');
@ -185,7 +185,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolatio
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: connection not open ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx CONTEXT: while executing command on localhost:xxxxx
-- cancellation on dropping old shard -- cancellation on dropping old shard
@ -195,7 +195,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolatio
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: canceling statement due to user request ERROR: canceling statement due to user request
-- failure on foreign key creation -- failure on foreign key creation
SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey FOREIGN KEY").kill()'); SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey FOREIGN KEY").kill()');
@ -204,7 +204,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey F
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: connection not open ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx CONTEXT: while executing command on localhost:xxxxx
-- failure on foreign key creation -- failure on foreign key creation
@ -214,7 +214,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey F
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: canceling statement due to user request ERROR: canceling statement due to user request
-- failure on shard split transaction -- failure on shard split transaction
SELECT citus.mitmproxy('conn.onQuery(query="BEGIN").kill()'); SELECT citus.mitmproxy('conn.onQuery(query="BEGIN").kill()');
@ -223,7 +223,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="BEGIN").kill()');
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: failure on connection marked as essential: localhost:xxxxx ERROR: failure on connection marked as essential: localhost:xxxxx
-- failure on shard split transaction -- failure on shard split transaction
SELECT citus.mitmproxy('conn.onQuery(query="BEGIN").cancel(' || :pid || ')'); SELECT citus.mitmproxy('conn.onQuery(query="BEGIN").cancel(' || :pid || ')');
@ -232,7 +232,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="BEGIN").cancel(' || :pid || ')');
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: canceling statement due to user request ERROR: canceling statement due to user request
-- failure on shard split transaction commit -- failure on shard split transaction commit
SELECT citus.mitmproxy('conn.onQuery(query="COMMIT").kill()'); SELECT citus.mitmproxy('conn.onQuery(query="COMMIT").kill()');
@ -241,7 +241,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="COMMIT").kill()');
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: failure on connection marked as essential: localhost:xxxxx ERROR: failure on connection marked as essential: localhost:xxxxx
-- failure on shard split transaction commit -- failure on shard split transaction commit
SELECT citus.mitmproxy('conn.onQuery(query="COMMIT").cancel(' || :pid || ')'); SELECT citus.mitmproxy('conn.onQuery(query="COMMIT").cancel(' || :pid || ')');
@ -250,7 +250,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="COMMIT").cancel(' || :pid || ')');
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: canceling statement due to user request ERROR: canceling statement due to user request
-- failure on transaction for dropping old tables -- failure on transaction for dropping old tables
SELECT citus.mitmproxy('conn.after(1).onQuery(query="BEGIN").kill()'); SELECT citus.mitmproxy('conn.after(1).onQuery(query="BEGIN").kill()');
@ -259,7 +259,7 @@ SELECT citus.mitmproxy('conn.after(1).onQuery(query="BEGIN").kill()');
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: failure on connection marked as essential: localhost:xxxxx ERROR: failure on connection marked as essential: localhost:xxxxx
-- failure on transaction for dropping old tables -- failure on transaction for dropping old tables
SELECT citus.mitmproxy('conn.after(1).onQuery(query="BEGIN").cancel(' || :pid || ')'); SELECT citus.mitmproxy('conn.after(1).onQuery(query="BEGIN").cancel(' || :pid || ')');
@ -268,7 +268,7 @@ SELECT citus.mitmproxy('conn.after(1).onQuery(query="BEGIN").cancel(' || :pid ||
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: canceling statement due to user request ERROR: canceling statement due to user request
-- failure on transaction for foreign key creation -- failure on transaction for foreign key creation
SELECT citus.mitmproxy('conn.after(2).onQuery(query="BEGIN").kill()'); SELECT citus.mitmproxy('conn.after(2).onQuery(query="BEGIN").kill()');
@ -277,7 +277,7 @@ SELECT citus.mitmproxy('conn.after(2).onQuery(query="BEGIN").kill()');
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: failure on connection marked as essential: localhost:xxxxx ERROR: failure on connection marked as essential: localhost:xxxxx
-- failure on transaction for foreign key creation -- failure on transaction for foreign key creation
SELECT citus.mitmproxy('conn.after(2).onQuery(query="BEGIN").cancel(' || :pid || ')'); SELECT citus.mitmproxy('conn.after(2).onQuery(query="BEGIN").cancel(' || :pid || ')');
@ -286,7 +286,7 @@ SELECT citus.mitmproxy('conn.after(2).onQuery(query="BEGIN").cancel(' || :pid ||
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: canceling statement due to user request ERROR: canceling statement due to user request
-- failure on transaction commit for foreign key creation -- failure on transaction commit for foreign key creation
SELECT citus.mitmproxy('conn.after(1).onQuery(query="COMMIT").kill()'); SELECT citus.mitmproxy('conn.after(1).onQuery(query="COMMIT").kill()');
@ -295,7 +295,7 @@ SELECT citus.mitmproxy('conn.after(1).onQuery(query="COMMIT").kill()');
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: failure on connection marked as essential: localhost:xxxxx ERROR: failure on connection marked as essential: localhost:xxxxx
-- failure on transaction commit for foreign key creation -- failure on transaction commit for foreign key creation
SELECT citus.mitmproxy('conn.after(1).onQuery(query="COMMIT").cancel(' || :pid || ')'); SELECT citus.mitmproxy('conn.after(1).onQuery(query="COMMIT").cancel(' || :pid || ')');
@ -304,7 +304,7 @@ SELECT citus.mitmproxy('conn.after(1).onQuery(query="COMMIT").cancel(' || :pid |
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: canceling statement due to user request ERROR: canceling statement due to user request
-- failure on transaction prepare for dropping old tables -- failure on transaction prepare for dropping old tables
SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").kill()'); SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").kill()');
@ -319,7 +319,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").kill()');
DO LANGUAGE plpgsql DO LANGUAGE plpgsql
$$ $$
BEGIN BEGIN
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
EXCEPTION WHEN OTHERS THEN EXCEPTION WHEN OTHERS THEN
RAISE 'Command failed to execute'; RAISE 'Command failed to execute';
END; END;
@ -333,7 +333,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").cancel(' || :p
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: canceling statement due to user request ERROR: canceling statement due to user request
-- failure on transaction commit for dropping old tables -- failure on transaction commit for dropping old tables
SELECT citus.mitmproxy('conn.after(2).onQuery(query="COMMIT").kill()'); SELECT citus.mitmproxy('conn.after(2).onQuery(query="COMMIT").kill()');
@ -342,7 +342,7 @@ SELECT citus.mitmproxy('conn.after(2).onQuery(query="COMMIT").kill()');
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: failure on connection marked as essential: localhost:xxxxx ERROR: failure on connection marked as essential: localhost:xxxxx
-- failure on transaction commit for dropping old tables -- failure on transaction commit for dropping old tables
SELECT citus.mitmproxy('conn.after(2).onQuery(query="COMMIT").cancel(' || :pid || ')'); SELECT citus.mitmproxy('conn.after(2).onQuery(query="COMMIT").cancel(' || :pid || ')');
@ -351,7 +351,7 @@ SELECT citus.mitmproxy('conn.after(2).onQuery(query="COMMIT").cancel(' || :pid |
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: canceling statement due to user request ERROR: canceling statement due to user request
-- verify that the tenant is not isolated -- verify that the tenant is not isolated
SELECT * FROM shard_sizes ORDER BY 1; SELECT * FROM shard_sizes ORDER BY 1;
@ -371,7 +371,7 @@ SELECT citus.mitmproxy('conn.allow()');
-- shard sizes after successful tenant isolation -- shard sizes after successful tenant isolation
CREATE TABLE old_shards AS SELECT shardid FROM pg_dist_shard; CREATE TABLE old_shards AS SELECT shardid FROM pg_dist_shard;
WITH new_shard AS ( WITH new_shard AS (
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE') AS shardid SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes') AS shardid
) )
SELECT row_count SELECT row_count
FROM shard_sizes FROM shard_sizes

View File

@ -0,0 +1,495 @@
--
-- failure_tenant_isolation
--
-- due to different libpq versions
-- some warning messages differ
-- between local and CI
SET client_min_messages TO ERROR;
CREATE SCHEMA IF NOT EXISTS tenant_isolation;
SET SEARCH_PATH = tenant_isolation;
SET citus.shard_count TO 2;
SET citus.next_shard_id TO 300;
SET citus.shard_replication_factor TO 1;
SET citus.max_adaptive_executor_pool_size TO 1;
SELECT pg_backend_pid() as pid \gset
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
(1 row)
CREATE TABLE table_1 (id int PRIMARY KEY);
CREATE TABLE table_2 (ref_id int REFERENCES table_1(id) UNIQUE, data int);
SELECT create_distributed_table('table_1', 'id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT create_distributed_table('table_2', 'ref_id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE VIEW shard_sizes AS
SELECT shardid, result AS row_count
FROM run_command_on_placements('table_1', 'SELECT count(*) FROM %s');
INSERT INTO table_1
SELECT x
FROM generate_series(1, 100) AS f (x);
INSERT INTO table_2
SELECT x, x
FROM generate_series(1, 100) AS f (x);
-- initial shard sizes
SELECT * FROM shard_sizes ORDER BY 1;
shardid | row_count
---------------------------------------------------------------------
300 | 49
301 | 51
(2 rows)
-- failure on table creation
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_1").kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
-- cancellation on table creation
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_1").cancel(' || :pid || ')');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: canceling statement due to user request
-- failure on colocated table creation
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_2").kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
-- cancellation on colocated table creation
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_2").cancel(' || :pid || ')');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: canceling statement due to user request
-- failure on table constraints on replica identity creation
SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_1 ADD CONSTRAINT").kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
-- cancellation on table constraints on replica identity creation
SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_1 ADD CONSTRAINT").cancel(' || :pid || ')');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: canceling statement due to user request
-- failure on publication creation
SELECT citus.mitmproxy('conn.onQuery(query="CREATE PUBLICATION").kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
-- cancellation on publication creation
SELECT citus.mitmproxy('conn.onQuery(query="CREATE PUBLICATION").cancel(' || :pid || ')');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: canceling statement due to user request
-- failure on replication slot creation
SELECT citus.mitmproxy('conn.onQuery(query="CREATE_REPLICATION_SLOT").kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
-- cancellation on replication slot creation
SELECT citus.mitmproxy('conn.onQuery(query="CREATE_REPLICATION_SLOT").cancel(' || :pid || ')');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: canceling statement due to user request
-- failure on setting snapshot
SELECT citus.mitmproxy('conn.onQuery(query="SET TRANSACTION SNAPSHOT").kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
-- cancellation on setting snapshot
SELECT citus.mitmproxy('conn.onQuery(query="SET TRANSACTION SNAPSHOT").cancel(' || :pid || ')');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: canceling statement due to user request
-- failure on table population
SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(300").kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
-- cancellation on table population
SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(300").cancel(' || :pid || ')');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: canceling statement due to user request
-- failure on colocated table population
SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(302").kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
-- cancellation on colocated table population
SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(302").cancel(' || :pid || ')');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: canceling statement due to user request
-- failure on replication setup udf
SELECT citus.mitmproxy('conn.onQuery(query="worker_split_shard_replication_setup").kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: Failed to run worker_split_shard_replication_setup UDF. It should successfully execute for splitting a shard in a non-blocking way. Please retry.
-- cancellation on replication setup udf
SELECT citus.mitmproxy('conn.onQuery(query="worker_split_shard_replication_setup").cancel(' || :pid || ')');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: canceling statement due to user request
-- failure on subscription creation
SELECT citus.mitmproxy('conn.onQuery(query="CREATE SUBSCRIPTION").kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
-- cancellation on subscription creation
SELECT citus.mitmproxy('conn.onQuery(query="CREATE SUBSCRIPTION").cancel(' || :pid || ')');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: canceling statement due to user request
-- failure on colocated table constraints
SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_2 ADD CONSTRAINT").after(1).kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
-- cancellation on colocated table constraints
SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_2 ADD CONSTRAINT").after(2).cancel(' || :pid || ')');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: canceling statement due to user request
-- failure on catching up LSN
SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_current_wal_lsn").kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
-- cancellation on catching up LSN
SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_current_wal_lsn").cancel(' || :pid || ')');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: canceling statement due to user request
-- failure on dropping subscription
SELECT citus.mitmproxy('conn.onQuery(query="DROP SUBSCRIPTION").kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
-- cancellation on dropping subscription
SELECT citus.mitmproxy('conn.onQuery(query="DROP SUBSCRIPTION").cancel(' || :pid || ')');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: canceling statement due to user request
-- failure on dropping publication
SELECT citus.mitmproxy('conn.onQuery(query="DROP PUBLICATION").kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
-- cancellation on dropping publication
SELECT citus.mitmproxy('conn.onQuery(query="DROP PUBLICATION").cancel(' || :pid || ')');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: canceling statement due to user request
-- failure on dropping replication slot
SELECT citus.mitmproxy('conn.onQuery(query="select pg_drop_replication_slot").kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
-- cancellation on dropping replication slot
SELECT citus.mitmproxy('conn.onQuery(query="select pg_drop_replication_slot").cancel(' || :pid || ')');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: canceling statement due to user request
-- failure on dropping old shard
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
-- cancellation on dropping old shard
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").cancel(' || :pid || ')');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: canceling statement due to user request
-- failure on dropping old colocated shard
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
-- cancellation on dropping old colocated shard
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").cancel(' || :pid || ')');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: canceling statement due to user request
-- failure on foreign key creation
SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey FOREIGN KEY").kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
-- failure on foreign key creation
SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey FOREIGN KEY").after(2).cancel(' || :pid || ')');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: canceling statement due to user request
-- failure on shard split transaction
SELECT citus.mitmproxy('conn.onQuery(query="BEGIN").kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: failure on connection marked as essential: localhost:xxxxx
-- failure on shard split transaction
SELECT citus.mitmproxy('conn.onQuery(query="BEGIN").cancel(' || :pid || ')');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: canceling statement due to user request
-- failure on shard split transaction commit
SELECT citus.mitmproxy('conn.onQuery(query="COMMIT").kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: failure on connection marked as essential: localhost:xxxxx
-- failure on shard split transaction commit
SELECT citus.mitmproxy('conn.onQuery(query="COMMIT").cancel(' || :pid || ')');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: canceling statement due to user request
-- failure on transaction prepare for dropping old tables
SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
-- due to libpq version differences, the output might change
-- hence use code block to catch the error
\set VERBOSITY terse
DO LANGUAGE plpgsql
$$
BEGIN
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
EXCEPTION WHEN OTHERS THEN
RAISE 'Command failed to execute';
END;
$$;
ERROR: Command failed to execute
\set VERBOSITY default
-- failure on transaction prepare for dropping old tables
SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").cancel(' || :pid || ')');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: canceling statement due to user request
-- verify that the tenant is not isolated
SELECT * FROM shard_sizes ORDER BY 1;
shardid | row_count
---------------------------------------------------------------------
300 | 49
301 | 51
(2 rows)
-- Verify that tenant can be isolated after unsuccessful attempts
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
(1 row)
-- shard sizes after successful tenant isolation
CREATE TABLE old_shards AS SELECT shardid FROM pg_dist_shard;
WITH new_shard AS (
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical') AS shardid
)
SELECT row_count
FROM shard_sizes
JOIN new_shard ON shard_sizes.shardid = new_shard.shardid;
row_count
---------------------------------------------------------------------
1
(1 row)
SELECT row_count
FROM shard_sizes
WHERE shard_sizes.shardid NOT IN (SELECT * FROM old_shards)
ORDER BY 1;
row_count
---------------------------------------------------------------------
1
20
28
(3 rows)
\set VERBOSITY terse
DROP SCHEMA tenant_isolation CASCADE;
\set VERBOSITY default

View File

@ -30,7 +30,7 @@ step s2-begin:
BEGIN; BEGIN;
step s2-isolate-tenant: step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5); SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard isolate_tenant_to_new_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -103,7 +103,7 @@ step s2-begin:
BEGIN; BEGIN;
step s2-isolate-tenant: step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5); SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard isolate_tenant_to_new_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -173,7 +173,7 @@ step s2-begin:
BEGIN; BEGIN;
step s2-isolate-tenant: step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5); SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard isolate_tenant_to_new_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -242,7 +242,7 @@ step s2-begin:
BEGIN; BEGIN;
step s2-isolate-tenant: step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5); SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard isolate_tenant_to_new_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -311,7 +311,7 @@ step s2-begin:
BEGIN; BEGIN;
step s2-isolate-tenant: step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5); SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard isolate_tenant_to_new_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -395,7 +395,7 @@ step s2-begin:
BEGIN; BEGIN;
step s2-isolate-tenant: step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5); SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard isolate_tenant_to_new_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -465,7 +465,7 @@ step s2-begin:
BEGIN; BEGIN;
step s2-isolate-tenant: step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5); SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard isolate_tenant_to_new_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -532,7 +532,7 @@ step s2-begin:
BEGIN; BEGIN;
step s2-isolate-tenant: step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5); SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard isolate_tenant_to_new_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -598,7 +598,7 @@ step s2-begin:
BEGIN; BEGIN;
step s2-isolate-tenant: step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5); SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard isolate_tenant_to_new_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -664,7 +664,7 @@ step s2-begin:
BEGIN; BEGIN;
step s2-isolate-tenant: step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5); SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard isolate_tenant_to_new_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -740,7 +740,7 @@ step s1-begin:
SET citus.select_opens_transaction_block TO false; SET citus.select_opens_transaction_block TO false;
step s1-isolate-tenant: step s1-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 2); SELECT isolate_tenant_to_new_shard('isolation_table', 2, shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard isolate_tenant_to_new_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -748,7 +748,7 @@ isolate_tenant_to_new_shard
(1 row) (1 row)
step s2-isolate-tenant: step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5); SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'block_writes');
ERROR: could not acquire the lock required to split public.isolation_table ERROR: could not acquire the lock required to split public.isolation_table
step s1-commit: step s1-commit:
@ -795,7 +795,7 @@ step s1-begin:
SET citus.select_opens_transaction_block TO false; SET citus.select_opens_transaction_block TO false;
step s1-isolate-tenant: step s1-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 2); SELECT isolate_tenant_to_new_shard('isolation_table', 2, shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard isolate_tenant_to_new_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -803,7 +803,7 @@ isolate_tenant_to_new_shard
(1 row) (1 row)
step s2-isolate-tenant: step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5); SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'block_writes');
ERROR: could not acquire the lock required to split public.isolation_table ERROR: could not acquire the lock required to split public.isolation_table
step s1-commit: step s1-commit:

View File

@ -0,0 +1,834 @@
Parsed test spec with 3 sessions
starting permutation: s1-load-cache s1-insert s3-acquire-advisory-lock s1-begin s1-select s2-begin s2-isolate-tenant s1-update s1-commit s3-release-advisory-lock s2-commit s2-print-cluster
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-load-cache:
TRUNCATE isolation_table;
step s1-insert:
INSERT INTO isolation_table VALUES (5, 10);
step s3-acquire-advisory-lock:
SELECT pg_advisory_lock(44000, 55152);
pg_advisory_lock
---------------------------------------------------------------------
(1 row)
step s1-begin:
BEGIN;
-- the tests are written with the logic where single shard SELECTs
-- do not to open transaction blocks
SET citus.select_opens_transaction_block TO false;
step s1-select:
SELECT count(*) FROM isolation_table WHERE id = 5;
count
---------------------------------------------------------------------
1
(1 row)
step s2-begin:
BEGIN;
step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'force_logical');
<waiting ...>
step s1-update:
UPDATE isolation_table SET value = 5 WHERE id = 5;
step s1-commit:
COMMIT;
step s3-release-advisory-lock:
SELECT pg_advisory_unlock(44000, 55152);
pg_advisory_unlock
---------------------------------------------------------------------
t
(1 row)
step s2-isolate-tenant: <... completed>
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500076
(1 row)
step s2-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('isolation_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM isolation_table ORDER BY id, value;
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500075|t | 0
57637|1500076|t | 1
57637|1500077|t | 0
57638|1500074|t | 0
(4 rows)
id|value
---------------------------------------------------------------------
5| 5
(1 row)
starting permutation: s1-load-cache s1-insert s3-acquire-advisory-lock s1-begin s1-select s2-begin s2-isolate-tenant s1-delete s1-commit s3-release-advisory-lock s2-commit s2-print-cluster
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-load-cache:
TRUNCATE isolation_table;
step s1-insert:
INSERT INTO isolation_table VALUES (5, 10);
step s3-acquire-advisory-lock:
SELECT pg_advisory_lock(44000, 55152);
pg_advisory_lock
---------------------------------------------------------------------
(1 row)
step s1-begin:
BEGIN;
-- the tests are written with the logic where single shard SELECTs
-- do not to open transaction blocks
SET citus.select_opens_transaction_block TO false;
step s1-select:
SELECT count(*) FROM isolation_table WHERE id = 5;
count
---------------------------------------------------------------------
1
(1 row)
step s2-begin:
BEGIN;
step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'force_logical');
<waiting ...>
step s1-delete:
DELETE FROM isolation_table WHERE id = 5;
step s1-commit:
COMMIT;
step s3-release-advisory-lock:
SELECT pg_advisory_unlock(44000, 55152);
pg_advisory_unlock
---------------------------------------------------------------------
t
(1 row)
step s2-isolate-tenant: <... completed>
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500082
(1 row)
step s2-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('isolation_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM isolation_table ORDER BY id, value;
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500081|t | 0
57637|1500082|t | 0
57637|1500083|t | 0
57638|1500080|t | 0
(4 rows)
id|value
---------------------------------------------------------------------
(0 rows)
starting permutation: s1-load-cache s3-acquire-advisory-lock s1-begin s1-select s2-begin s2-isolate-tenant s1-insert s1-commit s3-release-advisory-lock s2-commit s2-print-cluster
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-load-cache:
TRUNCATE isolation_table;
step s3-acquire-advisory-lock:
SELECT pg_advisory_lock(44000, 55152);
pg_advisory_lock
---------------------------------------------------------------------
(1 row)
step s1-begin:
BEGIN;
-- the tests are written with the logic where single shard SELECTs
-- do not to open transaction blocks
SET citus.select_opens_transaction_block TO false;
step s1-select:
SELECT count(*) FROM isolation_table WHERE id = 5;
count
---------------------------------------------------------------------
0
(1 row)
step s2-begin:
BEGIN;
step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'force_logical');
<waiting ...>
step s1-insert:
INSERT INTO isolation_table VALUES (5, 10);
step s1-commit:
COMMIT;
step s3-release-advisory-lock:
SELECT pg_advisory_unlock(44000, 55152);
pg_advisory_unlock
---------------------------------------------------------------------
t
(1 row)
step s2-isolate-tenant: <... completed>
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500088
(1 row)
step s2-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('isolation_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM isolation_table ORDER BY id, value;
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500087|t | 0
57637|1500088|t | 1
57637|1500089|t | 0
57638|1500086|t | 0
(4 rows)
id|value
---------------------------------------------------------------------
5| 10
(1 row)
starting permutation: s1-load-cache s3-acquire-advisory-lock s1-begin s1-select s2-begin s2-isolate-tenant s1-copy s1-commit s3-release-advisory-lock s2-commit s2-print-cluster
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-load-cache:
TRUNCATE isolation_table;
step s3-acquire-advisory-lock:
SELECT pg_advisory_lock(44000, 55152);
pg_advisory_lock
---------------------------------------------------------------------
(1 row)
step s1-begin:
BEGIN;
-- the tests are written with the logic where single shard SELECTs
-- do not to open transaction blocks
SET citus.select_opens_transaction_block TO false;
step s1-select:
SELECT count(*) FROM isolation_table WHERE id = 5;
count
---------------------------------------------------------------------
0
(1 row)
step s2-begin:
BEGIN;
step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'force_logical');
<waiting ...>
step s1-copy:
COPY isolation_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV;
step s1-commit:
COMMIT;
step s3-release-advisory-lock:
SELECT pg_advisory_unlock(44000, 55152);
pg_advisory_unlock
---------------------------------------------------------------------
t
(1 row)
step s2-isolate-tenant: <... completed>
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500094
(1 row)
step s2-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('isolation_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM isolation_table ORDER BY id, value;
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500093|t | 1
57637|1500094|t | 1
57637|1500095|t | 2
57638|1500092|t | 1
(4 rows)
id|value
---------------------------------------------------------------------
1| 1
2| 2
3| 3
4| 4
5| 5
(5 rows)
starting permutation: s1-insert s3-acquire-advisory-lock s1-begin s1-select s2-begin s2-isolate-tenant s1-update s1-commit s3-release-advisory-lock s2-commit s2-print-cluster
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-insert:
INSERT INTO isolation_table VALUES (5, 10);
step s3-acquire-advisory-lock:
SELECT pg_advisory_lock(44000, 55152);
pg_advisory_lock
---------------------------------------------------------------------
(1 row)
step s1-begin:
BEGIN;
-- the tests are written with the logic where single shard SELECTs
-- do not to open transaction blocks
SET citus.select_opens_transaction_block TO false;
step s1-select:
SELECT count(*) FROM isolation_table WHERE id = 5;
count
---------------------------------------------------------------------
1
(1 row)
step s2-begin:
BEGIN;
step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'force_logical');
<waiting ...>
step s1-update:
UPDATE isolation_table SET value = 5 WHERE id = 5;
step s1-commit:
COMMIT;
step s3-release-advisory-lock:
SELECT pg_advisory_unlock(44000, 55152);
pg_advisory_unlock
---------------------------------------------------------------------
t
(1 row)
step s2-isolate-tenant: <... completed>
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500100
(1 row)
step s2-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('isolation_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM isolation_table ORDER BY id, value;
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500099|t | 0
57637|1500100|t | 1
57637|1500101|t | 0
57638|1500098|t | 0
(4 rows)
id|value
---------------------------------------------------------------------
5| 5
(1 row)
starting permutation: s1-insert s3-acquire-advisory-lock s1-begin s1-select s2-begin s2-isolate-tenant s1-delete s1-commit s3-release-advisory-lock s2-commit s2-print-cluster
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-insert:
INSERT INTO isolation_table VALUES (5, 10);
step s3-acquire-advisory-lock:
SELECT pg_advisory_lock(44000, 55152);
pg_advisory_lock
---------------------------------------------------------------------
(1 row)
step s1-begin:
BEGIN;
-- the tests are written with the logic where single shard SELECTs
-- do not to open transaction blocks
SET citus.select_opens_transaction_block TO false;
step s1-select:
SELECT count(*) FROM isolation_table WHERE id = 5;
count
---------------------------------------------------------------------
1
(1 row)
step s2-begin:
BEGIN;
step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'force_logical');
<waiting ...>
step s1-delete:
DELETE FROM isolation_table WHERE id = 5;
step s1-commit:
COMMIT;
step s3-release-advisory-lock:
SELECT pg_advisory_unlock(44000, 55152);
pg_advisory_unlock
---------------------------------------------------------------------
t
(1 row)
step s2-isolate-tenant: <... completed>
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500106
(1 row)
step s2-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('isolation_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM isolation_table ORDER BY id, value;
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500105|t | 0
57637|1500106|t | 0
57637|1500107|t | 0
57638|1500104|t | 0
(4 rows)
id|value
---------------------------------------------------------------------
(0 rows)
starting permutation: s3-acquire-advisory-lock s1-begin s1-select s2-begin s2-isolate-tenant s1-insert s1-commit s3-release-advisory-lock s2-commit s2-print-cluster
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s3-acquire-advisory-lock:
SELECT pg_advisory_lock(44000, 55152);
pg_advisory_lock
---------------------------------------------------------------------
(1 row)
step s1-begin:
BEGIN;
-- the tests are written with the logic where single shard SELECTs
-- do not to open transaction blocks
SET citus.select_opens_transaction_block TO false;
step s1-select:
SELECT count(*) FROM isolation_table WHERE id = 5;
count
---------------------------------------------------------------------
0
(1 row)
step s2-begin:
BEGIN;
step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'force_logical');
<waiting ...>
step s1-insert:
INSERT INTO isolation_table VALUES (5, 10);
step s1-commit:
COMMIT;
step s3-release-advisory-lock:
SELECT pg_advisory_unlock(44000, 55152);
pg_advisory_unlock
---------------------------------------------------------------------
t
(1 row)
step s2-isolate-tenant: <... completed>
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500112
(1 row)
step s2-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('isolation_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM isolation_table ORDER BY id, value;
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500111|t | 0
57637|1500112|t | 1
57637|1500113|t | 0
57638|1500110|t | 0
(4 rows)
id|value
---------------------------------------------------------------------
5| 10
(1 row)
starting permutation: s3-acquire-advisory-lock s1-begin s1-select s2-begin s2-isolate-tenant s1-copy s1-commit s3-release-advisory-lock s2-commit s2-print-cluster
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s3-acquire-advisory-lock:
SELECT pg_advisory_lock(44000, 55152);
pg_advisory_lock
---------------------------------------------------------------------
(1 row)
step s1-begin:
BEGIN;
-- the tests are written with the logic where single shard SELECTs
-- do not to open transaction blocks
SET citus.select_opens_transaction_block TO false;
step s1-select:
SELECT count(*) FROM isolation_table WHERE id = 5;
count
---------------------------------------------------------------------
0
(1 row)
step s2-begin:
BEGIN;
step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'force_logical');
<waiting ...>
step s1-copy:
COPY isolation_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV;
step s1-commit:
COMMIT;
step s3-release-advisory-lock:
SELECT pg_advisory_unlock(44000, 55152);
pg_advisory_unlock
---------------------------------------------------------------------
t
(1 row)
step s2-isolate-tenant: <... completed>
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500118
(1 row)
step s2-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('isolation_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM isolation_table ORDER BY id, value;
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500117|t | 1
57637|1500118|t | 1
57637|1500119|t | 2
57638|1500116|t | 1
(4 rows)
id|value
---------------------------------------------------------------------
1| 1
2| 2
3| 3
4| 4
5| 5
(5 rows)
starting permutation: s1-load-cache s1-insert s3-acquire-advisory-lock s1-begin s1-isolate-tenant s2-isolate-tenant s3-release-advisory-lock s1-commit s2-print-cluster
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-load-cache:
TRUNCATE isolation_table;
step s1-insert:
INSERT INTO isolation_table VALUES (5, 10);
step s3-acquire-advisory-lock:
SELECT pg_advisory_lock(44000, 55152);
pg_advisory_lock
---------------------------------------------------------------------
(1 row)
step s1-begin:
BEGIN;
-- the tests are written with the logic where single shard SELECTs
-- do not to open transaction blocks
SET citus.select_opens_transaction_block TO false;
step s1-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 2, shard_transfer_mode => 'force_logical');
<waiting ...>
step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'force_logical');
ERROR: could not acquire the lock required to split public.isolation_table
step s3-release-advisory-lock:
SELECT pg_advisory_unlock(44000, 55152);
pg_advisory_unlock
---------------------------------------------------------------------
t
(1 row)
step s1-isolate-tenant: <... completed>
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500124
(1 row)
step s1-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('isolation_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM isolation_table ORDER BY id, value;
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500121|t | 1
57638|1500123|t | 0
57638|1500124|t | 0
57638|1500125|t | 0
(4 rows)
id|value
---------------------------------------------------------------------
5| 10
(1 row)
starting permutation: s1-insert s3-acquire-advisory-lock s1-begin s1-isolate-tenant s2-isolate-tenant s3-release-advisory-lock s1-commit s2-print-cluster
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-insert:
INSERT INTO isolation_table VALUES (5, 10);
step s3-acquire-advisory-lock:
SELECT pg_advisory_lock(44000, 55152);
pg_advisory_lock
---------------------------------------------------------------------
(1 row)
step s1-begin:
BEGIN;
-- the tests are written with the logic where single shard SELECTs
-- do not to open transaction blocks
SET citus.select_opens_transaction_block TO false;
step s1-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 2, shard_transfer_mode => 'force_logical');
<waiting ...>
step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'force_logical');
ERROR: could not acquire the lock required to split public.isolation_table
step s3-release-advisory-lock:
SELECT pg_advisory_unlock(44000, 55152);
pg_advisory_unlock
---------------------------------------------------------------------
t
(1 row)
step s1-isolate-tenant: <... completed>
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500130
(1 row)
step s1-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('isolation_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM isolation_table ORDER BY id, value;
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500127|t | 1
57638|1500129|t | 0
57638|1500130|t | 0
57638|1500131|t | 0
(4 rows)
id|value
---------------------------------------------------------------------
5| 10
(1 row)

View File

@ -16,7 +16,7 @@ step s2-begin:
BEGIN; BEGIN;
step s2-isolate-tenant: step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5); SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard isolate_tenant_to_new_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -73,7 +73,7 @@ step s2-begin:
BEGIN; BEGIN;
step s2-isolate-tenant: step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5); SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard isolate_tenant_to_new_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -130,7 +130,7 @@ step s2-begin:
BEGIN; BEGIN;
step s2-isolate-tenant: step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5); SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard isolate_tenant_to_new_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -187,7 +187,7 @@ step s2-begin:
BEGIN; BEGIN;
step s2-isolate-tenant: step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5); SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard isolate_tenant_to_new_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -244,7 +244,7 @@ step s2-begin:
BEGIN; BEGIN;
step s2-isolate-tenant: step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5); SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard isolate_tenant_to_new_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -400,7 +400,7 @@ SELECT create_distributed_table ('shard_split_table', 'a');
(1 row) (1 row)
SELECT 1 FROM isolate_tenant_to_new_shard('shard_split_table', 5); SELECT 1 FROM isolate_tenant_to_new_shard('shard_split_table', 5, shard_transfer_mode => 'block_writes');
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1

View File

@ -1081,6 +1081,7 @@ SELECT * FROM multi_extension.print_extension_changes();
function citus_internal.downgrade_columnar_storage(regclass) void | function citus_internal.downgrade_columnar_storage(regclass) void |
function citus_internal.upgrade_columnar_storage(regclass) void | function citus_internal.upgrade_columnar_storage(regclass) void |
function columnar.columnar_handler(internal) table_am_handler | function columnar.columnar_handler(internal) table_am_handler |
function isolate_tenant_to_new_shard(regclass,"any",text) bigint |
function replicate_reference_tables() void | function replicate_reference_tables() void |
function worker_cleanup_job_schema_cache() void | function worker_cleanup_job_schema_cache() void |
function worker_create_schema(bigint,text) void | function worker_create_schema(bigint,text) void |
@ -1098,6 +1099,7 @@ SELECT * FROM multi_extension.print_extension_changes();
table columnar.stripe | table columnar.stripe |
| function citus_locks() SETOF record | function citus_locks() SETOF record
| function citus_split_shard_by_split_points(bigint,text[],integer[],citus.shard_transfer_mode) void | function citus_split_shard_by_split_points(bigint,text[],integer[],citus.shard_transfer_mode) void
| function isolate_tenant_to_new_shard(regclass,"any",text,citus.shard_transfer_mode) bigint
| function replicate_reference_tables(citus.shard_transfer_mode) void | function replicate_reference_tables(citus.shard_transfer_mode) void
| function worker_copy_table_to_node(regclass,integer) void | function worker_copy_table_to_node(regclass,integer) void
| function worker_split_copy(bigint,split_copy_info[]) void | function worker_split_copy(bigint,split_copy_info[]) void
@ -1106,7 +1108,7 @@ SELECT * FROM multi_extension.print_extension_changes();
| type split_copy_info | type split_copy_info
| type split_shard_info | type split_shard_info
| view citus_locks | view citus_locks
(32 rows) (34 rows)
DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff;
-- show running version -- show running version

View File

@ -389,7 +389,7 @@ ORDER BY nodename, nodeport, shardid;
(4 rows) (4 rows)
-- verify isolate tenant carries grants -- verify isolate tenant carries grants
SELECT isolate_tenant_to_new_shard('multiuser_schema.hash_table', 5); SELECT isolate_tenant_to_new_shard('multiuser_schema.hash_table', 5, shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard isolate_tenant_to_new_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
109100 109100

View File

@ -177,19 +177,19 @@ SELECT * FROM pg_dist_shard
(4 rows) (4 rows)
-- check without cascade option -- check without cascade option
SELECT isolate_tenant_to_new_shard('lineitem_streaming', 100); SELECT isolate_tenant_to_new_shard('lineitem_streaming', 100, shard_transfer_mode => 'block_writes');
ERROR: cannot isolate tenant because "lineitem_streaming" has colocated tables ERROR: cannot isolate tenant because "lineitem_streaming" has colocated tables
HINT: Use CASCADE option to isolate tenants for the colocated tables too. Example usage: isolate_tenant_to_new_shard('lineitem_streaming', '100', 'CASCADE') HINT: Use CASCADE option to isolate tenants for the colocated tables too. Example usage: isolate_tenant_to_new_shard('lineitem_streaming', '100', 'CASCADE')
-- check with an input not castable to bigint -- check with an input not castable to bigint
SELECT isolate_tenant_to_new_shard('lineitem_streaming', 'abc', 'CASCADE'); SELECT isolate_tenant_to_new_shard('lineitem_streaming', 'abc', 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: invalid input syntax for type bigint: "abc" ERROR: invalid input syntax for type bigint: "abc"
SELECT isolate_tenant_to_new_shard('lineitem_streaming', 100, 'CASCADE'); SELECT isolate_tenant_to_new_shard('lineitem_streaming', 100, 'CASCADE', shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard isolate_tenant_to_new_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
1230005 1230005
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('lineitem_streaming', 101, 'CASCADE'); SELECT isolate_tenant_to_new_shard('lineitem_streaming', 101, 'CASCADE', shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard isolate_tenant_to_new_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
1230011 1230011
@ -221,53 +221,53 @@ SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
SET search_path to "Tenant Isolation"; SET search_path to "Tenant Isolation";
-- test a failing transaction block -- test a failing transaction block
BEGIN; BEGIN;
SELECT isolate_tenant_to_new_shard('orders_streaming', 102, 'CASCADE'); SELECT isolate_tenant_to_new_shard('orders_streaming', 102, 'CASCADE', shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard isolate_tenant_to_new_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
1230020 1230020
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('lineitem_streaming', 102, 'CASCADE'); SELECT isolate_tenant_to_new_shard('lineitem_streaming', 102, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: table lineitem_streaming has already been isolated for the given value ERROR: table lineitem_streaming has already been isolated for the given value
COMMIT; COMMIT;
-- test a rollback transaction block -- test a rollback transaction block
BEGIN; BEGIN;
SELECT isolate_tenant_to_new_shard('orders_streaming', 102, 'CASCADE'); SELECT isolate_tenant_to_new_shard('orders_streaming', 102, 'CASCADE', shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard isolate_tenant_to_new_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
1230026 1230026
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('orders_streaming', 103, 'CASCADE'); SELECT isolate_tenant_to_new_shard('orders_streaming', 103, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: cannot isolate tenant after other modifications in the same transaction. ERROR: cannot isolate tenant after other modifications in the same transaction.
ROLLBACK; ROLLBACK;
-- test a succesfull transaction block -- test a succesfull transaction block
BEGIN; BEGIN;
SELECT isolate_tenant_to_new_shard('orders_streaming', 102, 'CASCADE'); SELECT isolate_tenant_to_new_shard('orders_streaming', 102, 'CASCADE', shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard isolate_tenant_to_new_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
1230032 1230032
(1 row) (1 row)
COMMIT; COMMIT;
SELECT isolate_tenant_to_new_shard('orders_streaming', 103, 'CASCADE'); SELECT isolate_tenant_to_new_shard('orders_streaming', 103, 'CASCADE', shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard isolate_tenant_to_new_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
1230038 1230038
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('lineitem_streaming', 100, 'CASCADE'); SELECT isolate_tenant_to_new_shard('lineitem_streaming', 100, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: table lineitem_streaming has already been isolated for the given value ERROR: table lineitem_streaming has already been isolated for the given value
SELECT isolate_tenant_to_new_shard('orders_streaming', 101, 'CASCADE'); SELECT isolate_tenant_to_new_shard('orders_streaming', 101, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: table orders_streaming has already been isolated for the given value ERROR: table orders_streaming has already been isolated for the given value
-- test corner cases: hash(-1995148554) = -2147483648 and hash(-1686493264) = 2147483647 -- test corner cases: hash(-1995148554) = -2147483648 and hash(-1686493264) = 2147483647
SELECT isolate_tenant_to_new_shard('lineitem_streaming', -1995148554, 'CASCADE'); SELECT isolate_tenant_to_new_shard('lineitem_streaming', -1995148554, 'CASCADE', shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard isolate_tenant_to_new_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
1230040 1230040
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('orders_streaming', -1686493264, 'CASCADE'); SELECT isolate_tenant_to_new_shard('orders_streaming', -1686493264, 'CASCADE', shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard isolate_tenant_to_new_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
1230047 1230047
@ -622,11 +622,11 @@ SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1997-08-08';
1 1
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('lineitem_date', '1998-05-26'); SELECT isolate_tenant_to_new_shard('lineitem_date', '1998-05-26', shard_transfer_mode => 'block_writes');
ERROR: cannot isolate tenants when using shard replication ERROR: cannot isolate tenants when using shard replication
SELECT isolate_tenant_to_new_shard('lineitem_date', '1997-07-30'); SELECT isolate_tenant_to_new_shard('lineitem_date', '1997-07-30', shard_transfer_mode => 'block_writes');
ERROR: cannot isolate tenants when using shard replication ERROR: cannot isolate tenants when using shard replication
SELECT isolate_tenant_to_new_shard('lineitem_date', '1998-01-15'); SELECT isolate_tenant_to_new_shard('lineitem_date', '1998-01-15', shard_transfer_mode => 'block_writes');
ERROR: cannot isolate tenants when using shard replication ERROR: cannot isolate tenants when using shard replication
SELECT count(*) FROM lineitem_date; SELECT count(*) FROM lineitem_date;
count count
@ -662,7 +662,7 @@ SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1997-08-08';
\c - postgres - :master_port \c - postgres - :master_port
SET search_path to "Tenant Isolation"; SET search_path to "Tenant Isolation";
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE nodeport = :worker_1_port; UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE nodeport = :worker_1_port;
SELECT isolate_tenant_to_new_shard('lineitem_date', '1997-08-08'); SELECT isolate_tenant_to_new_shard('lineitem_date', '1997-08-08', shard_transfer_mode => 'block_writes');
ERROR: cannot split shard because relation "lineitem_date" has an inactive shard placement for the shard xxxxx ERROR: cannot split shard because relation "lineitem_date" has an inactive shard placement for the shard xxxxx
HINT: Use master_copy_shard_placement UDF to repair the inactive shard placement. HINT: Use master_copy_shard_placement UDF to repair the inactive shard placement.
UPDATE pg_dist_shard_placement SET shardstate = 1 WHERE nodeport = :worker_1_port; UPDATE pg_dist_shard_placement SET shardstate = 1 WHERE nodeport = :worker_1_port;
@ -679,7 +679,7 @@ SELECT create_distributed_table('test_append', 'tenant_id', 'append');
(1 row) (1 row)
SELECT isolate_tenant_to_new_shard('test_append', 100); SELECT isolate_tenant_to_new_shard('test_append', 100, shard_transfer_mode => 'block_writes');
ERROR: cannot isolate tenant because tenant isolation is only support for hash distributed tables ERROR: cannot isolate tenant because tenant isolation is only support for hash distributed tables
-- check metadata for comparison -- check metadata for comparison
SELECT * FROM pg_dist_shard SELECT * FROM pg_dist_shard
@ -752,7 +752,7 @@ SET citus.override_table_visibility TO false;
\c - mx_isolation_role_ent - :master_port \c - mx_isolation_role_ent - :master_port
SET search_path to "Tenant Isolation"; SET search_path to "Tenant Isolation";
\set VERBOSITY terse \set VERBOSITY terse
SELECT isolate_tenant_to_new_shard('orders_streaming', 104, 'CASCADE'); SELECT isolate_tenant_to_new_shard('orders_streaming', 104, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: command CREATE TABLE is disabled ERROR: command CREATE TABLE is disabled
\set VERBOSITY default \set VERBOSITY default
\c - postgres - :worker_1_port \c - postgres - :worker_1_port
@ -795,7 +795,7 @@ CREATE EVENT TRIGGER abort_drop ON sql_drop
\c - mx_isolation_role_ent - :master_port \c - mx_isolation_role_ent - :master_port
SET search_path to "Tenant Isolation"; SET search_path to "Tenant Isolation";
\set VERBOSITY terse \set VERBOSITY terse
SELECT isolate_tenant_to_new_shard('orders_streaming', 104, 'CASCADE'); SELECT isolate_tenant_to_new_shard('orders_streaming', 104, 'CASCADE', shard_transfer_mode => 'block_writes');
WARNING: command DROP TABLE is disabled WARNING: command DROP TABLE is disabled
WARNING: command DROP TABLE is disabled WARNING: command DROP TABLE is disabled
WARNING: command DROP TABLE is disabled WARNING: command DROP TABLE is disabled
@ -1014,7 +1014,7 @@ SELECT create_distributed_table('composite_table', 'composite_key');
INSERT INTO composite_table VALUES ('(1, 2)'::test_composite_type); INSERT INTO composite_table VALUES ('(1, 2)'::test_composite_type);
INSERT INTO composite_table VALUES ('(1, 3)'::test_composite_type); INSERT INTO composite_table VALUES ('(1, 3)'::test_composite_type);
INSERT INTO composite_table VALUES ('(1, 4)'::test_composite_type); INSERT INTO composite_table VALUES ('(1, 4)'::test_composite_type);
SELECT isolate_tenant_to_new_shard('composite_table', '(1, 3)'); SELECT isolate_tenant_to_new_shard('composite_table', '(1, 3)', shard_transfer_mode => 'block_writes');
ERROR: cannot isolate tenants when using shard replication ERROR: cannot isolate tenants when using shard replication
SELECT count(*) FROM composite_table WHERE composite_key = '(1, 2)'::test_composite_type; SELECT count(*) FROM composite_table WHERE composite_key = '(1, 2)'::test_composite_type;
count count
@ -1072,7 +1072,7 @@ INSERT INTO test_reference_table_fkey SELECT i FROM generate_series (0, 100) i;
INSERT INTO test_colocated_table_1 SELECT i, i FROM generate_series (0, 100) i; INSERT INTO test_colocated_table_1 SELECT i, i FROM generate_series (0, 100) i;
INSERT INTO test_colocated_table_2 SELECT i, i FROM generate_series (0, 100) i; INSERT INTO test_colocated_table_2 SELECT i, i FROM generate_series (0, 100) i;
INSERT INTO test_colocated_table_3 SELECT i, i FROM generate_series (0, 100) i; INSERT INTO test_colocated_table_3 SELECT i, i FROM generate_series (0, 100) i;
SELECT isolate_tenant_to_new_shard('test_colocated_table_2', 1, 'CASCADE'); SELECT isolate_tenant_to_new_shard('test_colocated_table_2', 1, 'CASCADE', shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard isolate_tenant_to_new_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
1230095 1230095
@ -1193,7 +1193,7 @@ SELECT count(*) FROM partitioning_test;
(1 row) (1 row)
-- isolate a value into its own shard -- isolate a value into its own shard
SELECT 1 FROM isolate_tenant_to_new_shard('partitioning_test', 2, 'CASCADE'); SELECT 1 FROM isolate_tenant_to_new_shard('partitioning_test', 2, 'CASCADE', shard_transfer_mode => 'block_writes');
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1
@ -1227,7 +1227,7 @@ SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE lo
\c - mx_isolation_role_ent - :master_port \c - mx_isolation_role_ent - :master_port
SET search_path to "Tenant Isolation"; SET search_path to "Tenant Isolation";
SELECT 1 FROM isolate_tenant_to_new_shard('test_colocated_table_2', 2, 'CASCADE'); SELECT 1 FROM isolate_tenant_to_new_shard('test_colocated_table_2', 2, 'CASCADE', shard_transfer_mode => 'block_writes');
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1
@ -1248,6 +1248,8 @@ SELECT 1 FROM master_remove_node('localhost', :master_port);
SET client_min_messages TO WARNING; SET client_min_messages TO WARNING;
DROP SCHEMA "Tenant Isolation" CASCADE; DROP SCHEMA "Tenant Isolation" CASCADE;
REVOKE ALL ON SCHEMA public FROM mx_isolation_role_ent;
DROP ROLE mx_isolation_role_ent;
-- stop & resync and stop syncing metadata -- stop & resync and stop syncing metadata
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
stop_metadata_sync_to_node stop_metadata_sync_to_node

File diff suppressed because it is too large Load Diff

View File

@ -207,7 +207,7 @@ SELECT * FROM view_created_before_shard_moves;
(1 row) (1 row)
-- show that tenant isolation works fine -- show that tenant isolation works fine
SELECT isolate_tenant_to_new_shard('test', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('test', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard isolate_tenant_to_new_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
90731505 90731505

View File

@ -144,7 +144,7 @@ ORDER BY 1;
function get_rebalance_progress() function get_rebalance_progress()
function get_rebalance_table_shards_plan(regclass,real,integer,bigint[],boolean,name,real) function get_rebalance_table_shards_plan(regclass,real,integer,bigint[],boolean,name,real)
function get_shard_id_for_distribution_column(regclass,"any") function get_shard_id_for_distribution_column(regclass,"any")
function isolate_tenant_to_new_shard(regclass,"any",text) function isolate_tenant_to_new_shard(regclass,"any",text,citus.shard_transfer_mode)
function json_cat_agg(json) function json_cat_agg(json)
function jsonb_cat_agg(jsonb) function jsonb_cat_agg(jsonb)
function lock_relation_if_exists(text,text) function lock_relation_if_exists(text,text)

View File

@ -66,7 +66,7 @@ step "s1-copy"
step "s1-isolate-tenant" step "s1-isolate-tenant"
{ {
SELECT isolate_tenant_to_new_shard('isolation_table', 2); SELECT isolate_tenant_to_new_shard('isolation_table', 2, shard_transfer_mode => 'block_writes');
} }
step "s1-commit" step "s1-commit"
@ -83,7 +83,7 @@ step "s2-begin"
step "s2-isolate-tenant" step "s2-isolate-tenant"
{ {
SELECT isolate_tenant_to_new_shard('isolation_table', 5); SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'block_writes');
} }
step "s2-commit" step "s2-commit"

View File

@ -0,0 +1,142 @@
setup
{
SET citus.shard_count to 2;
SET citus.shard_replication_factor to 1;
SELECT setval('pg_dist_shardid_seq',
CASE WHEN nextval('pg_dist_shardid_seq') > 1599999 OR nextval('pg_dist_shardid_seq') < 1500000
THEN 1500000
ELSE nextval('pg_dist_shardid_seq')-2
END);
CREATE TABLE isolation_table (id int PRIMARY KEY, value int);
SELECT create_distributed_table('isolation_table', 'id');
}
teardown
{
DROP TABLE isolation_table;
}
session "s1"
step "s1-begin"
{
BEGIN;
-- the tests are written with the logic where single shard SELECTs
-- do not to open transaction blocks
SET citus.select_opens_transaction_block TO false;
}
// cache all placements
step "s1-load-cache"
{
TRUNCATE isolation_table;
}
step "s1-insert"
{
INSERT INTO isolation_table VALUES (5, 10);
}
step "s1-update"
{
UPDATE isolation_table SET value = 5 WHERE id = 5;
}
step "s1-delete"
{
DELETE FROM isolation_table WHERE id = 5;
}
step "s1-select"
{
SELECT count(*) FROM isolation_table WHERE id = 5;
}
step "s1-copy"
{
COPY isolation_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV;
}
step "s1-isolate-tenant"
{
SELECT isolate_tenant_to_new_shard('isolation_table', 2, shard_transfer_mode => 'force_logical');
}
step "s1-commit"
{
COMMIT;
}
session "s2"
step "s2-begin"
{
BEGIN;
}
step "s2-isolate-tenant"
{
SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'force_logical');
}
step "s2-commit"
{
COMMIT;
}
step "s2-print-cluster"
{
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('isolation_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM isolation_table ORDER BY id, value;
}
session "s3"
// this advisory lock with (almost) random values are only used
// for testing purposes. For details, check Citus' logical replication
// source code
step "s3-acquire-advisory-lock"
{
SELECT pg_advisory_lock(44000, 55152);
}
step "s3-release-advisory-lock"
{
SELECT pg_advisory_unlock(44000, 55152);
}
// Even if we do not block shard split copy operations, we eventually have to block concurrent sessions during metadata operations (tiny amount of time)
// To prevent concurrent DML from being blocked by metadata operations, we take the advisory lock from another dummy session, and let the concurrent DML
// executes concurrently with shard split copy. Just before committing, we release the advisory lock from the dummy session to allow all the sessions to finish.
// s3 takes an advisory lock before s2 takes it (we take the same lock for shard movement only during isolation tests) =>
// s1 can execute its DML command concurrently with s2 shard isolation =>
// s3 releases the advisory lock so that s2 can finish the transaction
// run tenant isolation while concurrently performing an DML and index creation
// we expect DML queries of s2 to succeed without being blocked.
permutation "s1-load-cache" "s1-insert" "s3-acquire-advisory-lock" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-update" "s1-commit" "s3-release-advisory-lock" "s2-commit" "s2-print-cluster"
permutation "s1-load-cache" "s1-insert" "s3-acquire-advisory-lock" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-delete" "s1-commit" "s3-release-advisory-lock" "s2-commit" "s2-print-cluster"
permutation "s1-load-cache" "s3-acquire-advisory-lock" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-insert" "s1-commit" "s3-release-advisory-lock" "s2-commit" "s2-print-cluster"
permutation "s1-load-cache" "s3-acquire-advisory-lock" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-copy" "s1-commit" "s3-release-advisory-lock" "s2-commit" "s2-print-cluster"
// the same tests without loading the cache at first
permutation "s1-insert" "s3-acquire-advisory-lock" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-update" "s1-commit" "s3-release-advisory-lock" "s2-commit" "s2-print-cluster"
permutation "s1-insert" "s3-acquire-advisory-lock" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-delete" "s1-commit" "s3-release-advisory-lock" "s2-commit" "s2-print-cluster"
permutation "s3-acquire-advisory-lock" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-insert" "s1-commit" "s3-release-advisory-lock" "s2-commit" "s2-print-cluster"
permutation "s3-acquire-advisory-lock" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-copy" "s1-commit" "s3-release-advisory-lock" "s2-commit" "s2-print-cluster"
// concurrent tenant isolation blocks on different shards of the same table (or any colocated table)
permutation "s1-load-cache" "s1-insert" "s3-acquire-advisory-lock" "s1-begin" "s1-isolate-tenant" "s2-isolate-tenant" "s3-release-advisory-lock" "s1-commit" "s2-print-cluster"
// the same test above without loading the cache at first
permutation "s1-insert" "s3-acquire-advisory-lock" "s1-begin" "s1-isolate-tenant" "s2-isolate-tenant" "s3-release-advisory-lock" "s1-commit" "s2-print-cluster"

View File

@ -69,7 +69,7 @@ step "s2-begin"
step "s2-isolate-tenant" step "s2-isolate-tenant"
{ {
SELECT isolate_tenant_to_new_shard('isolation_table', 5); SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'block_writes');
} }
step "s2-add-fkey" step "s2-add-fkey"

View File

@ -201,7 +201,7 @@ SET citus.shard_count TO 23;
CREATE TABLE shard_split_table (a int, b int); CREATE TABLE shard_split_table (a int, b int);
SELECT create_distributed_table ('shard_split_table', 'a'); SELECT create_distributed_table ('shard_split_table', 'a');
SELECT 1 FROM isolate_tenant_to_new_shard('shard_split_table', 5); SELECT 1 FROM isolate_tenant_to_new_shard('shard_split_table', 5, shard_transfer_mode => 'block_writes');
-- show the difference in pg_dist_colocation and citus_tables shard counts -- show the difference in pg_dist_colocation and citus_tables shard counts
SELECT SELECT

View File

@ -17,7 +17,7 @@ CREATE TABLE citus_local_table (a int, b int);
SELECT citus_add_local_table_to_metadata('citus_local_table'); SELECT citus_add_local_table_to_metadata('citus_local_table');
-- isolate_tenant_to_new_shard is not supported -- isolate_tenant_to_new_shard is not supported
SELECT isolate_tenant_to_new_shard('citus_local_table', 100); SELECT isolate_tenant_to_new_shard('citus_local_table', 100, shard_transfer_mode => 'block_writes');
-- master_copy_shard_placement is not supported -- master_copy_shard_placement is not supported
SELECT master_copy_shard_placement(shardid, 'localhost', :master_port, 'localhost', :worker_1_port, false) SELECT master_copy_shard_placement(shardid, 'localhost', :master_port, 'localhost', :worker_1_port, false)

View File

@ -232,7 +232,7 @@ SET citus.shard_replication_factor TO 1;
8999002, -- sensors_old 8999002, -- sensors_old
ARRAY['-2127770000'], ARRAY['-2127770000'],
ARRAY[:worker_1_node, :worker_2_node], ARRAY[:worker_1_node, :worker_2_node],
'auto'); 'force_logical');
-- END: Split a partition table directly -- END: Split a partition table directly
-- BEGIN: Validate Shard Info and Data -- BEGIN: Validate Shard Info and Data

View File

@ -9,6 +9,10 @@ Here is a high level overview of test plan:
6. Trigger Split on both shards of 'sensors'. This will also split co-located tables. 6. Trigger Split on both shards of 'sensors'. This will also split co-located tables.
7. Move one of the split shard to test Split -> ShardMove. 7. Move one of the split shard to test Split -> ShardMove.
8. Split an already split shard second time on a different schema. 8. Split an already split shard second time on a different schema.
9. Create a colocated table with no replica identity.
10. Show we do not allow Split with the shard transfer mode 'auto' if any colocated table has no replica identity.
11. Drop the colocated table with no replica identity.
12. Show we allow Split with the shard transfer mode 'auto' if all colocated tables has replica identity.
*/ */
CREATE SCHEMA "citus_split_test_schema"; CREATE SCHEMA "citus_split_test_schema";
@ -228,6 +232,49 @@ SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, node
ORDER BY logicalrelid, shardminvalue::BIGINT; ORDER BY logicalrelid, shardminvalue::BIGINT;
-- END: Split second time on another schema -- END: Split second time on another schema
-- BEGIN: Create a co-located table with no replica identity.
CREATE TABLE table_no_rep_id (measureid integer);
SELECT create_distributed_table('table_no_rep_id', 'measureid', colocate_with:='sensors');
-- END: Create a co-located table with no replica identity.
-- BEGIN: Split a shard with shard_transfer_mode='auto' and with a colocated table with no replica identity
SET citus.next_shard_id TO 8981041;
SELECT pg_catalog.citus_split_shard_by_split_points(
8981031,
ARRAY['-2120000000'],
ARRAY[:worker_1_node, :worker_2_node],
'auto');
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport
FROM pg_dist_shard AS shard
INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid
INNER JOIN pg_dist_node node ON placement.groupid = node.groupid
INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid
WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass)
ORDER BY logicalrelid, shardminvalue::BIGINT;
-- END: Split a shard with shard_transfer_mode='auto' and with a colocated table with no replica identity
-- BEGIN: Drop the co-located table with no replica identity.
DROP TABLE table_no_rep_id;
-- END: Drop the co-located table with no replica identity.
-- BEGIN: Split a shard with shard_transfer_mode='auto' and with all colocated tables has replica identity
SET citus.next_shard_id TO 8981041;
SELECT pg_catalog.citus_split_shard_by_split_points(
8981031,
ARRAY['-2120000000'],
ARRAY[:worker_1_node, :worker_2_node],
'auto');
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport
FROM pg_dist_shard AS shard
INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid
INNER JOIN pg_dist_node node ON placement.groupid = node.groupid
INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid
WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass)
ORDER BY logicalrelid, shardminvalue::BIGINT;
-- END: Split a shard with shard_transfer_mode='auto' and with all colocated tables has replica identity
-- BEGIN: Validate Data Count -- BEGIN: Validate Data Count
SELECT COUNT(*) FROM sensors; SELECT COUNT(*) FROM sensors;
SELECT COUNT(*) FROM reference_table; SELECT COUNT(*) FROM reference_table;

View File

@ -92,7 +92,7 @@ FROM shard_ranges;
-- UDF fails where source shard cannot be split further i.e min and max range is equal. -- UDF fails where source shard cannot be split further i.e min and max range is equal.
-- Create a Shard where range cannot be split further -- Create a Shard where range cannot be split further
SELECT isolate_tenant_to_new_shard('table_to_split', 1); SELECT isolate_tenant_to_new_shard('table_to_split', 1, shard_transfer_mode => 'block_writes');
SELECT citus_split_shard_by_split_points( SELECT citus_split_shard_by_split_points(
49761305, 49761305,
ARRAY['-1073741826'], ARRAY['-1073741826'],

View File

@ -39,119 +39,119 @@ SELECT * FROM shard_sizes ORDER BY 1;
-- failure on colocated table creation -- failure on colocated table creation
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_2").kill()'); SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_2").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- cancellation on colocated table creation -- cancellation on colocated table creation
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_2").cancel(' || :pid || ')'); SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_2").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- failure on colocated table population -- failure on colocated table population
SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(302").kill()'); SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(302").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- cancellation on colocated table population -- cancellation on colocated table population
SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(302").cancel(' || :pid || ')'); SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(302").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- failure on colocated table constraints -- failure on colocated table constraints
SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_2 ADD CONSTRAINT").after(1).kill()'); SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_2 ADD CONSTRAINT").after(1).kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- cancellation on colocated table constraints -- cancellation on colocated table constraints
SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_2 ADD CONSTRAINT").after(2).cancel(' || :pid || ')'); SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_2 ADD CONSTRAINT").after(2).cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- failure on table creation -- failure on table creation
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_1").kill()'); SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_1").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- cancellation on table creation -- cancellation on table creation
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_1").cancel(' || :pid || ')'); SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_1").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- failure on table population -- failure on table population
SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(300").kill()'); SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(300").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- cancellation on table population -- cancellation on table population
SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(300").cancel(' || :pid || ')'); SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(300").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- failure on table constraints -- failure on table constraints
SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_1 ADD CONSTRAINT").after(1).kill()'); SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_1 ADD CONSTRAINT").after(1).kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- cancellation on table constraints -- cancellation on table constraints
SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_1 ADD CONSTRAINT").after(2).cancel(' || :pid || ')'); SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_1 ADD CONSTRAINT").after(2).cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- failure on dropping old colocated shard -- failure on dropping old colocated shard
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").kill()'); SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- cancellation on dropping old colocated shard -- cancellation on dropping old colocated shard
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").cancel(' || :pid || ')'); SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- failure on dropping old shard -- failure on dropping old shard
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").kill()'); SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- cancellation on dropping old shard -- cancellation on dropping old shard
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").cancel(' || :pid || ')'); SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- failure on foreign key creation -- failure on foreign key creation
SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey FOREIGN KEY").kill()'); SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey FOREIGN KEY").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- failure on foreign key creation -- failure on foreign key creation
SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey FOREIGN KEY").after(2).cancel(' || :pid || ')'); SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey FOREIGN KEY").after(2).cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- failure on shard split transaction -- failure on shard split transaction
SELECT citus.mitmproxy('conn.onQuery(query="BEGIN").kill()'); SELECT citus.mitmproxy('conn.onQuery(query="BEGIN").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- failure on shard split transaction -- failure on shard split transaction
SELECT citus.mitmproxy('conn.onQuery(query="BEGIN").cancel(' || :pid || ')'); SELECT citus.mitmproxy('conn.onQuery(query="BEGIN").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- failure on shard split transaction commit -- failure on shard split transaction commit
SELECT citus.mitmproxy('conn.onQuery(query="COMMIT").kill()'); SELECT citus.mitmproxy('conn.onQuery(query="COMMIT").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- failure on shard split transaction commit -- failure on shard split transaction commit
SELECT citus.mitmproxy('conn.onQuery(query="COMMIT").cancel(' || :pid || ')'); SELECT citus.mitmproxy('conn.onQuery(query="COMMIT").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- failure on transaction for dropping old tables -- failure on transaction for dropping old tables
SELECT citus.mitmproxy('conn.after(1).onQuery(query="BEGIN").kill()'); SELECT citus.mitmproxy('conn.after(1).onQuery(query="BEGIN").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- failure on transaction for dropping old tables -- failure on transaction for dropping old tables
SELECT citus.mitmproxy('conn.after(1).onQuery(query="BEGIN").cancel(' || :pid || ')'); SELECT citus.mitmproxy('conn.after(1).onQuery(query="BEGIN").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- failure on transaction for foreign key creation -- failure on transaction for foreign key creation
SELECT citus.mitmproxy('conn.after(2).onQuery(query="BEGIN").kill()'); SELECT citus.mitmproxy('conn.after(2).onQuery(query="BEGIN").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- failure on transaction for foreign key creation -- failure on transaction for foreign key creation
SELECT citus.mitmproxy('conn.after(2).onQuery(query="BEGIN").cancel(' || :pid || ')'); SELECT citus.mitmproxy('conn.after(2).onQuery(query="BEGIN").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- failure on transaction commit for foreign key creation -- failure on transaction commit for foreign key creation
SELECT citus.mitmproxy('conn.after(1).onQuery(query="COMMIT").kill()'); SELECT citus.mitmproxy('conn.after(1).onQuery(query="COMMIT").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- failure on transaction commit for foreign key creation -- failure on transaction commit for foreign key creation
SELECT citus.mitmproxy('conn.after(1).onQuery(query="COMMIT").cancel(' || :pid || ')'); SELECT citus.mitmproxy('conn.after(1).onQuery(query="COMMIT").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- failure on transaction prepare for dropping old tables -- failure on transaction prepare for dropping old tables
SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").kill()'); SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").kill()');
@ -162,7 +162,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").kill()');
DO LANGUAGE plpgsql DO LANGUAGE plpgsql
$$ $$
BEGIN BEGIN
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
EXCEPTION WHEN OTHERS THEN EXCEPTION WHEN OTHERS THEN
RAISE 'Command failed to execute'; RAISE 'Command failed to execute';
END; END;
@ -171,15 +171,15 @@ $$;
-- failure on transaction prepare for dropping old tables -- failure on transaction prepare for dropping old tables
SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").cancel(' || :pid || ')'); SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- failure on transaction commit for dropping old tables -- failure on transaction commit for dropping old tables
SELECT citus.mitmproxy('conn.after(2).onQuery(query="COMMIT").kill()'); SELECT citus.mitmproxy('conn.after(2).onQuery(query="COMMIT").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- failure on transaction commit for dropping old tables -- failure on transaction commit for dropping old tables
SELECT citus.mitmproxy('conn.after(2).onQuery(query="COMMIT").cancel(' || :pid || ')'); SELECT citus.mitmproxy('conn.after(2).onQuery(query="COMMIT").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- verify that the tenant is not isolated -- verify that the tenant is not isolated
@ -191,7 +191,7 @@ SELECT citus.mitmproxy('conn.allow()');
-- shard sizes after successful tenant isolation -- shard sizes after successful tenant isolation
CREATE TABLE old_shards AS SELECT shardid FROM pg_dist_shard; CREATE TABLE old_shards AS SELECT shardid FROM pg_dist_shard;
WITH new_shard AS ( WITH new_shard AS (
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE') AS shardid SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes') AS shardid
) )
SELECT row_count SELECT row_count
FROM shard_sizes FROM shard_sizes

View File

@ -0,0 +1,243 @@
--
-- failure_tenant_isolation
--
-- due to different libpq versions
-- some warning messages differ
-- between local and CI
SET client_min_messages TO ERROR;
CREATE SCHEMA IF NOT EXISTS tenant_isolation;
SET SEARCH_PATH = tenant_isolation;
SET citus.shard_count TO 2;
SET citus.next_shard_id TO 300;
SET citus.shard_replication_factor TO 1;
SET citus.max_adaptive_executor_pool_size TO 1;
SELECT pg_backend_pid() as pid \gset
SELECT citus.mitmproxy('conn.allow()');
CREATE TABLE table_1 (id int PRIMARY KEY);
CREATE TABLE table_2 (ref_id int REFERENCES table_1(id) UNIQUE, data int);
SELECT create_distributed_table('table_1', 'id');
SELECT create_distributed_table('table_2', 'ref_id');
CREATE VIEW shard_sizes AS
SELECT shardid, result AS row_count
FROM run_command_on_placements('table_1', 'SELECT count(*) FROM %s');
INSERT INTO table_1
SELECT x
FROM generate_series(1, 100) AS f (x);
INSERT INTO table_2
SELECT x, x
FROM generate_series(1, 100) AS f (x);
-- initial shard sizes
SELECT * FROM shard_sizes ORDER BY 1;
-- failure on table creation
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_1").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- cancellation on table creation
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_1").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- failure on colocated table creation
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_2").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- cancellation on colocated table creation
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_2").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- failure on table constraints on replica identity creation
SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_1 ADD CONSTRAINT").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- cancellation on table constraints on replica identity creation
SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_1 ADD CONSTRAINT").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- failure on publication creation
SELECT citus.mitmproxy('conn.onQuery(query="CREATE PUBLICATION").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- cancellation on publication creation
SELECT citus.mitmproxy('conn.onQuery(query="CREATE PUBLICATION").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- failure on replication slot creation
SELECT citus.mitmproxy('conn.onQuery(query="CREATE_REPLICATION_SLOT").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- cancellation on replication slot creation
SELECT citus.mitmproxy('conn.onQuery(query="CREATE_REPLICATION_SLOT").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- failure on setting snapshot
SELECT citus.mitmproxy('conn.onQuery(query="SET TRANSACTION SNAPSHOT").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- cancellation on setting snapshot
SELECT citus.mitmproxy('conn.onQuery(query="SET TRANSACTION SNAPSHOT").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- failure on table population
SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(300").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- cancellation on table population
SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(300").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- failure on colocated table population
SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(302").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- cancellation on colocated table population
SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(302").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- failure on replication setup udf
SELECT citus.mitmproxy('conn.onQuery(query="worker_split_shard_replication_setup").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- cancellation on replication setup udf
SELECT citus.mitmproxy('conn.onQuery(query="worker_split_shard_replication_setup").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- failure on subscription creation
SELECT citus.mitmproxy('conn.onQuery(query="CREATE SUBSCRIPTION").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- cancellation on subscription creation
SELECT citus.mitmproxy('conn.onQuery(query="CREATE SUBSCRIPTION").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- failure on colocated table constraints
SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_2 ADD CONSTRAINT").after(1).kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- cancellation on colocated table constraints
SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_2 ADD CONSTRAINT").after(2).cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- failure on catching up LSN
SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_current_wal_lsn").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- cancellation on catching up LSN
SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_current_wal_lsn").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- failure on dropping subscription
SELECT citus.mitmproxy('conn.onQuery(query="DROP SUBSCRIPTION").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- cancellation on dropping subscription
SELECT citus.mitmproxy('conn.onQuery(query="DROP SUBSCRIPTION").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- failure on dropping publication
SELECT citus.mitmproxy('conn.onQuery(query="DROP PUBLICATION").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- cancellation on dropping publication
SELECT citus.mitmproxy('conn.onQuery(query="DROP PUBLICATION").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- failure on dropping replication slot
SELECT citus.mitmproxy('conn.onQuery(query="select pg_drop_replication_slot").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- cancellation on dropping replication slot
SELECT citus.mitmproxy('conn.onQuery(query="select pg_drop_replication_slot").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- failure on dropping old shard
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- cancellation on dropping old shard
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- failure on dropping old colocated shard
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- cancellation on dropping old colocated shard
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- failure on foreign key creation
SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey FOREIGN KEY").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- failure on foreign key creation
SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey FOREIGN KEY").after(2).cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- failure on shard split transaction
SELECT citus.mitmproxy('conn.onQuery(query="BEGIN").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- failure on shard split transaction
SELECT citus.mitmproxy('conn.onQuery(query="BEGIN").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- failure on shard split transaction commit
SELECT citus.mitmproxy('conn.onQuery(query="COMMIT").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- failure on shard split transaction commit
SELECT citus.mitmproxy('conn.onQuery(query="COMMIT").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- failure on transaction prepare for dropping old tables
SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").kill()');
-- due to libpq version differences, the output might change
-- hence use code block to catch the error
\set VERBOSITY terse
DO LANGUAGE plpgsql
$$
BEGIN
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
EXCEPTION WHEN OTHERS THEN
RAISE 'Command failed to execute';
END;
$$;
\set VERBOSITY default
-- failure on transaction prepare for dropping old tables
SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- verify that the tenant is not isolated
SELECT * FROM shard_sizes ORDER BY 1;
-- Verify that tenant can be isolated after unsuccessful attempts
SELECT citus.mitmproxy('conn.allow()');
-- shard sizes after successful tenant isolation
CREATE TABLE old_shards AS SELECT shardid FROM pg_dist_shard;
WITH new_shard AS (
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical') AS shardid
)
SELECT row_count
FROM shard_sizes
JOIN new_shard ON shard_sizes.shardid = new_shard.shardid;
SELECT row_count
FROM shard_sizes
WHERE shard_sizes.shardid NOT IN (SELECT * FROM old_shards)
ORDER BY 1;
\set VERBOSITY terse
DROP SCHEMA tenant_isolation CASCADE;
\set VERBOSITY default

View File

@ -264,7 +264,7 @@ SET citus.shard_replication_factor TO 1;
CREATE TABLE shard_split_table (a int, b int); CREATE TABLE shard_split_table (a int, b int);
SELECT create_distributed_table ('shard_split_table', 'a'); SELECT create_distributed_table ('shard_split_table', 'a');
SELECT 1 FROM isolate_tenant_to_new_shard('shard_split_table', 5); SELECT 1 FROM isolate_tenant_to_new_shard('shard_split_table', 5, shard_transfer_mode => 'block_writes');
-- show the difference in pg_dist_colocation and citus_tables shard counts -- show the difference in pg_dist_colocation and citus_tables shard counts
SELECT SELECT

View File

@ -164,7 +164,7 @@ SELECT * FROM run_command_on_placements('multiuser_schema.another_table', $$ sel
ORDER BY nodename, nodeport, shardid; ORDER BY nodename, nodeport, shardid;
-- verify isolate tenant carries grants -- verify isolate tenant carries grants
SELECT isolate_tenant_to_new_shard('multiuser_schema.hash_table', 5); SELECT isolate_tenant_to_new_shard('multiuser_schema.hash_table', 5, shard_transfer_mode => 'block_writes');
SELECT * FROM run_command_on_placements('multiuser_schema.hash_table', $$ select has_table_privilege('read_access', '%s', 'select') $$) SELECT * FROM run_command_on_placements('multiuser_schema.hash_table', $$ select has_table_privilege('read_access', '%s', 'select') $$)
ORDER BY nodename, nodeport, shardid; ORDER BY nodename, nodeport, shardid;

View File

@ -136,13 +136,13 @@ SELECT * FROM pg_dist_shard
ORDER BY shardminvalue::BIGINT, logicalrelid; ORDER BY shardminvalue::BIGINT, logicalrelid;
-- check without cascade option -- check without cascade option
SELECT isolate_tenant_to_new_shard('lineitem_streaming', 100); SELECT isolate_tenant_to_new_shard('lineitem_streaming', 100, shard_transfer_mode => 'block_writes');
-- check with an input not castable to bigint -- check with an input not castable to bigint
SELECT isolate_tenant_to_new_shard('lineitem_streaming', 'abc', 'CASCADE'); SELECT isolate_tenant_to_new_shard('lineitem_streaming', 'abc', 'CASCADE', shard_transfer_mode => 'block_writes');
SELECT isolate_tenant_to_new_shard('lineitem_streaming', 100, 'CASCADE'); SELECT isolate_tenant_to_new_shard('lineitem_streaming', 100, 'CASCADE', shard_transfer_mode => 'block_writes');
SELECT isolate_tenant_to_new_shard('lineitem_streaming', 101, 'CASCADE'); SELECT isolate_tenant_to_new_shard('lineitem_streaming', 101, 'CASCADE', shard_transfer_mode => 'block_writes');
-- add an explain check to see if we hit the new isolated shard -- add an explain check to see if we hit the new isolated shard
EXPLAIN (COSTS false) SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 101; EXPLAIN (COSTS false) SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 101;
@ -156,28 +156,28 @@ SET search_path to "Tenant Isolation";
-- test a failing transaction block -- test a failing transaction block
BEGIN; BEGIN;
SELECT isolate_tenant_to_new_shard('orders_streaming', 102, 'CASCADE'); SELECT isolate_tenant_to_new_shard('orders_streaming', 102, 'CASCADE', shard_transfer_mode => 'block_writes');
SELECT isolate_tenant_to_new_shard('lineitem_streaming', 102, 'CASCADE'); SELECT isolate_tenant_to_new_shard('lineitem_streaming', 102, 'CASCADE', shard_transfer_mode => 'block_writes');
COMMIT; COMMIT;
-- test a rollback transaction block -- test a rollback transaction block
BEGIN; BEGIN;
SELECT isolate_tenant_to_new_shard('orders_streaming', 102, 'CASCADE'); SELECT isolate_tenant_to_new_shard('orders_streaming', 102, 'CASCADE', shard_transfer_mode => 'block_writes');
SELECT isolate_tenant_to_new_shard('orders_streaming', 103, 'CASCADE'); SELECT isolate_tenant_to_new_shard('orders_streaming', 103, 'CASCADE', shard_transfer_mode => 'block_writes');
ROLLBACK; ROLLBACK;
-- test a succesfull transaction block -- test a succesfull transaction block
BEGIN; BEGIN;
SELECT isolate_tenant_to_new_shard('orders_streaming', 102, 'CASCADE'); SELECT isolate_tenant_to_new_shard('orders_streaming', 102, 'CASCADE', shard_transfer_mode => 'block_writes');
COMMIT; COMMIT;
SELECT isolate_tenant_to_new_shard('orders_streaming', 103, 'CASCADE'); SELECT isolate_tenant_to_new_shard('orders_streaming', 103, 'CASCADE', shard_transfer_mode => 'block_writes');
SELECT isolate_tenant_to_new_shard('lineitem_streaming', 100, 'CASCADE'); SELECT isolate_tenant_to_new_shard('lineitem_streaming', 100, 'CASCADE', shard_transfer_mode => 'block_writes');
SELECT isolate_tenant_to_new_shard('orders_streaming', 101, 'CASCADE'); SELECT isolate_tenant_to_new_shard('orders_streaming', 101, 'CASCADE', shard_transfer_mode => 'block_writes');
-- test corner cases: hash(-1995148554) = -2147483648 and hash(-1686493264) = 2147483647 -- test corner cases: hash(-1995148554) = -2147483648 and hash(-1686493264) = 2147483647
SELECT isolate_tenant_to_new_shard('lineitem_streaming', -1995148554, 'CASCADE'); SELECT isolate_tenant_to_new_shard('lineitem_streaming', -1995148554, 'CASCADE', shard_transfer_mode => 'block_writes');
SELECT isolate_tenant_to_new_shard('orders_streaming', -1686493264, 'CASCADE'); SELECT isolate_tenant_to_new_shard('orders_streaming', -1686493264, 'CASCADE', shard_transfer_mode => 'block_writes');
SELECT count(*) FROM orders_streaming WHERE o_orderkey = -1995148554; SELECT count(*) FROM orders_streaming WHERE o_orderkey = -1995148554;
SELECT count(*) FROM orders_streaming WHERE o_orderkey = -1686493264; SELECT count(*) FROM orders_streaming WHERE o_orderkey = -1686493264;
@ -305,9 +305,9 @@ SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1997-07-30';
SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1998-01-15'; SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1998-01-15';
SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1997-08-08'; SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1997-08-08';
SELECT isolate_tenant_to_new_shard('lineitem_date', '1998-05-26'); SELECT isolate_tenant_to_new_shard('lineitem_date', '1998-05-26', shard_transfer_mode => 'block_writes');
SELECT isolate_tenant_to_new_shard('lineitem_date', '1997-07-30'); SELECT isolate_tenant_to_new_shard('lineitem_date', '1997-07-30', shard_transfer_mode => 'block_writes');
SELECT isolate_tenant_to_new_shard('lineitem_date', '1998-01-15'); SELECT isolate_tenant_to_new_shard('lineitem_date', '1998-01-15', shard_transfer_mode => 'block_writes');
SELECT count(*) FROM lineitem_date; SELECT count(*) FROM lineitem_date;
SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1998-05-26'; SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1998-05-26';
@ -320,7 +320,7 @@ SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1997-08-08';
SET search_path to "Tenant Isolation"; SET search_path to "Tenant Isolation";
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE nodeport = :worker_1_port; UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE nodeport = :worker_1_port;
SELECT isolate_tenant_to_new_shard('lineitem_date', '1997-08-08'); SELECT isolate_tenant_to_new_shard('lineitem_date', '1997-08-08', shard_transfer_mode => 'block_writes');
UPDATE pg_dist_shard_placement SET shardstate = 1 WHERE nodeport = :worker_1_port; UPDATE pg_dist_shard_placement SET shardstate = 1 WHERE nodeport = :worker_1_port;
@ -334,7 +334,7 @@ CREATE TABLE test_append (
); );
SELECT create_distributed_table('test_append', 'tenant_id', 'append'); SELECT create_distributed_table('test_append', 'tenant_id', 'append');
SELECT isolate_tenant_to_new_shard('test_append', 100); SELECT isolate_tenant_to_new_shard('test_append', 100, shard_transfer_mode => 'block_writes');
-- check metadata for comparison -- check metadata for comparison
SELECT * FROM pg_dist_shard SELECT * FROM pg_dist_shard
@ -366,7 +366,7 @@ SET citus.override_table_visibility TO false;
SET search_path to "Tenant Isolation"; SET search_path to "Tenant Isolation";
\set VERBOSITY terse \set VERBOSITY terse
SELECT isolate_tenant_to_new_shard('orders_streaming', 104, 'CASCADE'); SELECT isolate_tenant_to_new_shard('orders_streaming', 104, 'CASCADE', shard_transfer_mode => 'block_writes');
\set VERBOSITY default \set VERBOSITY default
@ -397,7 +397,7 @@ CREATE EVENT TRIGGER abort_drop ON sql_drop
SET search_path to "Tenant Isolation"; SET search_path to "Tenant Isolation";
\set VERBOSITY terse \set VERBOSITY terse
SELECT isolate_tenant_to_new_shard('orders_streaming', 104, 'CASCADE'); SELECT isolate_tenant_to_new_shard('orders_streaming', 104, 'CASCADE', shard_transfer_mode => 'block_writes');
\set VERBOSITY default \set VERBOSITY default
@ -502,7 +502,7 @@ INSERT INTO composite_table VALUES ('(1, 2)'::test_composite_type);
INSERT INTO composite_table VALUES ('(1, 3)'::test_composite_type); INSERT INTO composite_table VALUES ('(1, 3)'::test_composite_type);
INSERT INTO composite_table VALUES ('(1, 4)'::test_composite_type); INSERT INTO composite_table VALUES ('(1, 4)'::test_composite_type);
SELECT isolate_tenant_to_new_shard('composite_table', '(1, 3)'); SELECT isolate_tenant_to_new_shard('composite_table', '(1, 3)', shard_transfer_mode => 'block_writes');
SELECT count(*) FROM composite_table WHERE composite_key = '(1, 2)'::test_composite_type; SELECT count(*) FROM composite_table WHERE composite_key = '(1, 2)'::test_composite_type;
SELECT count(*) FROM composite_table WHERE composite_key = '(1, 3)'::test_composite_type; SELECT count(*) FROM composite_table WHERE composite_key = '(1, 3)'::test_composite_type;
@ -532,7 +532,7 @@ INSERT INTO test_colocated_table_1 SELECT i, i FROM generate_series (0, 100) i;
INSERT INTO test_colocated_table_2 SELECT i, i FROM generate_series (0, 100) i; INSERT INTO test_colocated_table_2 SELECT i, i FROM generate_series (0, 100) i;
INSERT INTO test_colocated_table_3 SELECT i, i FROM generate_series (0, 100) i; INSERT INTO test_colocated_table_3 SELECT i, i FROM generate_series (0, 100) i;
SELECT isolate_tenant_to_new_shard('test_colocated_table_2', 1, 'CASCADE'); SELECT isolate_tenant_to_new_shard('test_colocated_table_2', 1, 'CASCADE', shard_transfer_mode => 'block_writes');
SELECT count(*) FROM test_colocated_table_2; SELECT count(*) FROM test_colocated_table_2;
@ -585,7 +585,7 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid = 'partitioning_test'::reg
SELECT count(*) FROM partitioning_test; SELECT count(*) FROM partitioning_test;
-- isolate a value into its own shard -- isolate a value into its own shard
SELECT 1 FROM isolate_tenant_to_new_shard('partitioning_test', 2, 'CASCADE'); SELECT 1 FROM isolate_tenant_to_new_shard('partitioning_test', 2, 'CASCADE', shard_transfer_mode => 'block_writes');
SELECT count(*) FROM pg_dist_shard WHERE logicalrelid = 'partitioning_test'::regclass; SELECT count(*) FROM pg_dist_shard WHERE logicalrelid = 'partitioning_test'::regclass;
SELECT count(*) FROM partitioning_test; SELECT count(*) FROM partitioning_test;
@ -601,7 +601,7 @@ SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE lo
\c - mx_isolation_role_ent - :master_port \c - mx_isolation_role_ent - :master_port
SET search_path to "Tenant Isolation"; SET search_path to "Tenant Isolation";
SELECT 1 FROM isolate_tenant_to_new_shard('test_colocated_table_2', 2, 'CASCADE'); SELECT 1 FROM isolate_tenant_to_new_shard('test_colocated_table_2', 2, 'CASCADE', shard_transfer_mode => 'block_writes');
SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'ref_table'::regclass; SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'ref_table'::regclass;
@ -609,6 +609,8 @@ SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE lo
SELECT 1 FROM master_remove_node('localhost', :master_port); SELECT 1 FROM master_remove_node('localhost', :master_port);
SET client_min_messages TO WARNING; SET client_min_messages TO WARNING;
DROP SCHEMA "Tenant Isolation" CASCADE; DROP SCHEMA "Tenant Isolation" CASCADE;
REVOKE ALL ON SCHEMA public FROM mx_isolation_role_ent;
DROP ROLE mx_isolation_role_ent;
-- stop & resync and stop syncing metadata -- stop & resync and stop syncing metadata
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);

View File

@ -0,0 +1,637 @@
--
-- MULTI_TENANT_ISOLATION
--
-- Tests tenant isolation feature
--
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1230000;
SELECT nextval('pg_catalog.pg_dist_placement_placementid_seq') AS last_placement_id
\gset
ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 100000;
CREATE SCHEMA "Tenant Isolation";
SET search_path to "Tenant Isolation";
CREATE ROLE mx_isolation_role_ent WITH LOGIN;
GRANT ALL ON SCHEMA "Tenant Isolation", public TO mx_isolation_role_ent;
-- connect with this new role
\c - mx_isolation_role_ent - :master_port
SET search_path to "Tenant Isolation";
SET citus.shard_replication_factor TO 1;
SET citus.shard_count to 2;
CREATE TABLE lineitem_streaming (
l_orderkey bigint not null,
l_partkey integer not null,
l_suppkey integer not null,
l_linenumber integer not null,
l_quantity decimal(15, 2) not null,
l_extendedprice decimal(15, 2) not null,
l_discount decimal(15, 2) not null,
l_tax decimal(15, 2) not null,
l_returnflag char(1) not null,
l_linestatus char(1) not null,
l_shipdate date not null,
l_commitdate date not null,
l_receiptdate date not null,
l_shipinstruct char(25) not null,
l_shipmode char(10) not null,
l_comment varchar(44) not null);
SELECT create_distributed_table('lineitem_streaming', 'l_orderkey');
CREATE TABLE orders_streaming (
o_orderkey bigint not null primary key,
o_custkey integer not null,
o_orderstatus char(1) not null,
o_totalprice decimal(15,2) not null,
o_orderdate date not null,
o_orderpriority char(15) not null,
o_clerk char(15) not null,
o_shippriority integer not null,
o_comment varchar(79) not null);
SELECT create_distributed_table('orders_streaming', 'o_orderkey');
\COPY lineitem_streaming FROM STDIN WITH DELIMITER '|'
99|87114|4639|1|10|11011.10|0.02|0.01|A|F|1994-05-18|1994-06-03|1994-05-23|COLLECT COD|RAIL|kages. requ
99|123766|3767|2|5|8948.80|0.02|0.07|R|F|1994-05-06|1994-05-28|1994-05-20|TAKE BACK RETURN|RAIL|ests cajole fluffily waters. blithe
99|134082|1622|3|42|46875.36|0.02|0.02|A|F|1994-04-19|1994-05-18|1994-04-20|NONE|RAIL|kages are fluffily furiously ir
99|108338|849|4|36|48467.88|0.09|0.02|A|F|1994-07-04|1994-04-17|1994-07-30|DELIVER IN PERSON|AIR|slyly. slyly e
100|62029|2030|1|28|27748.56|0.04|0.05|N|O|1998-05-08|1998-05-13|1998-06-07|COLLECT COD|TRUCK|sts haggle. slowl
100|115979|8491|2|22|43889.34|0.00|0.07|N|O|1998-06-24|1998-04-12|1998-06-29|DELIVER IN PERSON|SHIP|nto beans alongside of the fi
100|46150|8655|3|46|50422.90|0.03|0.04|N|O|1998-05-02|1998-04-10|1998-05-22|TAKE BACK RETURN|SHIP|ular accounts. even
100|38024|3031|4|14|13468.28|0.06|0.03|N|O|1998-05-22|1998-05-01|1998-06-03|COLLECT COD|MAIL|y. furiously ironic ideas gr
100|53439|955|5|37|51519.91|0.05|0.00|N|O|1998-03-06|1998-04-16|1998-03-31|TAKE BACK RETURN|TRUCK|nd the quickly s
101|118282|5816|1|49|63713.72|0.10|0.00|N|O|1996-06-21|1996-05-27|1996-06-29|DELIVER IN PERSON|REG AIR|ts
101|163334|883|2|36|50303.88|0.00|0.01|N|O|1996-05-19|1996-05-01|1996-06-04|DELIVER IN PERSON|AIR|tes. blithely pending dolphins x-ray f
101|138418|5958|3|12|17476.92|0.06|0.02|N|O|1996-03-29|1996-04-20|1996-04-12|COLLECT COD|MAIL|. quickly regular
102|88914|3931|1|37|70407.67|0.06|0.00|N|O|1997-07-24|1997-08-02|1997-08-07|TAKE BACK RETURN|SHIP|ully across the ideas. final deposit
102|169238|6787|2|34|44445.82|0.03|0.08|N|O|1997-08-09|1997-07-28|1997-08-26|TAKE BACK RETURN|SHIP|eposits cajole across
102|182321|4840|3|25|35083.00|0.01|0.01|N|O|1997-07-31|1997-07-24|1997-08-17|NONE|RAIL|bits. ironic accoun
102|61158|8677|4|15|16787.25|0.07|0.07|N|O|1997-06-02|1997-07-13|1997-06-04|DELIVER IN PERSON|SHIP|final packages. carefully even excu
103|194658|2216|1|6|10515.90|0.03|0.05|N|O|1996-10-11|1996-07-25|1996-10-28|NONE|FOB|cajole. carefully ex
103|10426|2928|2|37|49447.54|0.02|0.07|N|O|1996-09-17|1996-07-27|1996-09-20|TAKE BACK RETURN|MAIL|ies. quickly ironic requests use blithely
103|28431|8432|3|23|31266.89|0.01|0.04|N|O|1996-09-11|1996-09-18|1996-09-26|NONE|FOB|ironic accou
103|29022|4027|4|32|30432.64|0.01|0.07|N|O|1996-07-30|1996-08-06|1996-08-04|NONE|RAIL|kages doze. special, regular deposit
-1995148554|112942|2943|1|9|17594.46|0.04|0.04|N|O|1996-08-03|1996-05-31|1996-08-04|DELIVER IN PERSON|TRUCK|c realms print carefully car
-1686493264|15110|113|5|2|2050.22|0.03|0.08|R|F|1994-04-26|1994-03-15|1994-05-15|TAKE BACK RETURN|MAIL|e final, regular requests. carefully
\.
\COPY orders_streaming FROM STDIN WITH DELIMITER '|'
99|890|F|108594.87|1994-03-13|4-NOT SPECIFIED|Clerk#000000973|0|e carefully ironic packages. pending
100|1471|O|198978.27|1998-02-28|4-NOT SPECIFIED|Clerk#000000577|0|heodolites detect slyly alongside of the ent
101|280|O|118448.39|1996-03-17|3-MEDIUM|Clerk#000000419|0|ding accounts above the slyly final asymptote
102|8|O|184806.58|1997-05-09|2-HIGH|Clerk#000000596|0| slyly according to the asymptotes. carefully final packages integrate furious
103|292|O|118745.16|1996-06-20|4-NOT SPECIFIED|Clerk#000000090|0|ges. carefully unusual instructions haggle quickly regular f
-1995148554|142|O|3553.15|1995-05-08|3-MEDIUM|Clerk#000000378|0|nts hinder fluffily ironic instructions. express, express excuses
-1686493264|878|O|177809.13|1997-09-05|3-MEDIUM|Clerk#000000379|0|y final packages. final foxes since the quickly even
\.
ALTER TABLE lineitem_streaming ADD CONSTRAINT test_constraint
FOREIGN KEY(l_orderkey) REFERENCES orders_streaming(o_orderkey);
-- test failing foreign constraints
\COPY lineitem_streaming FROM STDIN WITH DELIMITER '|'
128|106828|9339|1|38|69723.16|0.06|0.01|A|F|1992-09-01|1992-08-27|1992-10-01|TAKE BACK RETURN|FOB| cajole careful
\.
-- tests for cluster health
SELECT count(*) FROM lineitem_streaming;
SELECT count(*) FROM orders_streaming;
SELECT
l_orderkey,
sum(l_extendedprice * (1 - l_discount)) as revenue,
o_orderdate
FROM
orders_streaming,
lineitem_streaming
WHERE
l_orderkey = o_orderkey
GROUP BY
l_orderkey,
o_orderdate
ORDER BY
revenue DESC,
o_orderdate;
-- Checks to see if metadata and data are isolated properly. If there are problems in
-- metadata and/or data on workers, these queries should return different results below
-- after tenant isolation operations are applied.
SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 99;
SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 100;
SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 101;
SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 102;
SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 103;
SELECT count(*) FROM orders_streaming WHERE o_orderkey = 99;
SELECT count(*) FROM orders_streaming WHERE o_orderkey = 100;
SELECT count(*) FROM orders_streaming WHERE o_orderkey = 101;
SELECT count(*) FROM orders_streaming WHERE o_orderkey = 102;
SELECT count(*) FROM orders_streaming WHERE o_orderkey = 103;
SELECT * FROM pg_dist_shard
WHERE logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass
ORDER BY shardminvalue::BIGINT, logicalrelid;
-- check without cascade option
SELECT isolate_tenant_to_new_shard('lineitem_streaming', 100, shard_transfer_mode => 'force_logical');
-- check with an input not castable to bigint
SELECT isolate_tenant_to_new_shard('lineitem_streaming', 'abc', 'CASCADE', shard_transfer_mode => 'force_logical');
SELECT isolate_tenant_to_new_shard('lineitem_streaming', 100, 'CASCADE', shard_transfer_mode => 'force_logical');
SELECT isolate_tenant_to_new_shard('lineitem_streaming', 101, 'CASCADE', shard_transfer_mode => 'force_logical');
-- add an explain check to see if we hit the new isolated shard
EXPLAIN (COSTS false) SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 101;
-- create an MX node
\c - postgres - :master_port
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
\c - mx_isolation_role_ent - :master_port
SET search_path to "Tenant Isolation";
-- test a failing transaction block
BEGIN;
SELECT isolate_tenant_to_new_shard('orders_streaming', 102, 'CASCADE', shard_transfer_mode => 'force_logical');
SELECT isolate_tenant_to_new_shard('lineitem_streaming', 102, 'CASCADE', shard_transfer_mode => 'force_logical');
COMMIT;
-- test a rollback transaction block
BEGIN;
SELECT isolate_tenant_to_new_shard('orders_streaming', 102, 'CASCADE', shard_transfer_mode => 'force_logical');
SELECT isolate_tenant_to_new_shard('orders_streaming', 103, 'CASCADE', shard_transfer_mode => 'force_logical');
ROLLBACK;
-- test a succesfull transaction block
BEGIN;
SELECT isolate_tenant_to_new_shard('orders_streaming', 102, 'CASCADE', shard_transfer_mode => 'force_logical');
COMMIT;
SELECT isolate_tenant_to_new_shard('orders_streaming', 103, 'CASCADE', shard_transfer_mode => 'force_logical');
SELECT isolate_tenant_to_new_shard('lineitem_streaming', 100, 'CASCADE', shard_transfer_mode => 'force_logical');
SELECT isolate_tenant_to_new_shard('orders_streaming', 101, 'CASCADE', shard_transfer_mode => 'force_logical');
-- test corner cases: hash(-1995148554) = -2147483648 and hash(-1686493264) = 2147483647
SELECT isolate_tenant_to_new_shard('lineitem_streaming', -1995148554, 'CASCADE', shard_transfer_mode => 'force_logical');
SELECT isolate_tenant_to_new_shard('orders_streaming', -1686493264, 'CASCADE', shard_transfer_mode => 'force_logical');
SELECT count(*) FROM orders_streaming WHERE o_orderkey = -1995148554;
SELECT count(*) FROM orders_streaming WHERE o_orderkey = -1686493264;
-- tests for cluster health
SELECT count(*) FROM lineitem_streaming;
SELECT count(*) FROM orders_streaming;
SELECT
l_orderkey,
sum(l_extendedprice * (1 - l_discount)) as revenue,
o_orderdate
FROM
orders_streaming,
lineitem_streaming
WHERE
l_orderkey = o_orderkey
GROUP BY
l_orderkey,
o_orderdate
ORDER BY
revenue DESC,
o_orderdate;
SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 99;
SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 100;
SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 101;
SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 102;
SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 103;
SELECT count(*) FROM orders_streaming WHERE o_orderkey = 99;
SELECT count(*) FROM orders_streaming WHERE o_orderkey = 100;
SELECT count(*) FROM orders_streaming WHERE o_orderkey = 101;
SELECT count(*) FROM orders_streaming WHERE o_orderkey = 102;
SELECT count(*) FROM orders_streaming WHERE o_orderkey = 103;
SELECT * FROM pg_dist_shard
WHERE logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass
ORDER BY shardminvalue::BIGINT, logicalrelid;
SELECT * FROM pg_dist_shard_placement WHERE shardid >= 1230000 ORDER BY nodeport, shardid;
-- test failing foreign constraints after multiple tenant isolation
\COPY lineitem_streaming FROM STDIN WITH DELIMITER '|'
128|106828|9339|1|38|69723.16|0.06|0.01|A|F|1992-09-01|1992-08-27|1992-10-01|TAKE BACK RETURN|FOB| cajole careful
\.
-- connect to the worker node with metadata
\c - mx_isolation_role_ent - :worker_1_port
SET search_path to "Tenant Isolation";
-- check mx tables
SELECT count(*) FROM lineitem_streaming;
SELECT count(*) FROM orders_streaming;
SELECT
l_orderkey,
sum(l_extendedprice * (1 - l_discount)) as revenue,
o_orderdate
FROM
orders_streaming,
lineitem_streaming
WHERE
l_orderkey = o_orderkey
GROUP BY
l_orderkey,
o_orderdate
ORDER BY
revenue DESC,
o_orderdate;
-- check shards
SET citus.override_table_visibility TO false;
\d
\c - postgres - :worker_1_port
SET search_path to "Tenant Isolation";
SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='orders_streaming_1230039'::regclass;
\c - mx_isolation_role_ent - :worker_1_port
SET search_path to "Tenant Isolation";
-- check MX metadata
SELECT * FROM pg_dist_shard
WHERE logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass
ORDER BY shardminvalue::BIGINT, logicalrelid;
-- return to master node
\c - mx_isolation_role_ent - :master_port
-- test a distribution type which does not have a sql hash function
SET search_path to "Tenant Isolation";
SET citus.shard_replication_factor TO 2;
SET citus.shard_count to 2;
CREATE TABLE lineitem_date (
l_orderkey bigint not null,
l_partkey integer not null,
l_suppkey integer not null,
l_linenumber integer not null,
l_quantity decimal(15, 2) not null,
l_extendedprice decimal(15, 2) not null,
l_discount decimal(15, 2) not null,
l_tax decimal(15, 2) not null,
l_returnflag char(1) not null,
l_linestatus char(1) not null,
l_shipdate date not null,
l_commitdate date not null,
l_receiptdate date not null,
l_shipinstruct char(25) not null,
l_shipmode char(10) not null,
l_comment varchar(44) not null);
SELECT create_distributed_table('lineitem_date', 'l_shipdate');
\COPY lineitem_date FROM STDIN WITH DELIMITER '|'
390|106523|9034|1|10|15295.20|0.02|0.05|N|O|1998-05-26|1998-07-06|1998-06-23|TAKE BACK RETURN|SHIP| requests. final accounts x-ray beside the
1347|112077|4589|4|28|30493.96|0.01|0.00|N|O|1997-07-30|1997-07-22|1997-08-18|TAKE BACK RETURN|FOB|foxes after the blithely special i
1794|116434|1457|5|47|68170.21|0.10|0.06|N|O|1998-01-15|1997-11-30|1998-02-14|DELIVER IN PERSON|TRUCK| haggle slyly. furiously express orbit
1859|74969|4970|1|18|34991.28|0.10|0.00|N|O|1997-08-08|1997-06-30|1997-08-26|TAKE BACK RETURN|SHIP|e carefully a
\.
SELECT count(*) FROM lineitem_date;
SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1998-05-26';
SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1997-07-30';
SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1998-01-15';
SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1997-08-08';
SELECT isolate_tenant_to_new_shard('lineitem_date', '1998-05-26', shard_transfer_mode => 'force_logical');
SELECT isolate_tenant_to_new_shard('lineitem_date', '1997-07-30', shard_transfer_mode => 'force_logical');
SELECT isolate_tenant_to_new_shard('lineitem_date', '1998-01-15', shard_transfer_mode => 'force_logical');
SELECT count(*) FROM lineitem_date;
SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1998-05-26';
SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1997-07-30';
SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1998-01-15';
SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1997-08-08';
-- test with invalid shard placements
\c - postgres - :master_port
SET search_path to "Tenant Isolation";
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE nodeport = :worker_1_port;
SELECT isolate_tenant_to_new_shard('lineitem_date', '1997-08-08', shard_transfer_mode => 'force_logical');
UPDATE pg_dist_shard_placement SET shardstate = 1 WHERE nodeport = :worker_1_port;
\c - mx_isolation_role_ent - :master_port
SET search_path to "Tenant Isolation";
DROP TABLE lineitem_date;
-- test on append distributed table
CREATE TABLE test_append (
tenant_id integer
);
SELECT create_distributed_table('test_append', 'tenant_id', 'append');
SELECT isolate_tenant_to_new_shard('test_append', 100, shard_transfer_mode => 'force_logical');
-- check metadata for comparison
SELECT * FROM pg_dist_shard
WHERE logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass
ORDER BY shardminvalue::BIGINT, logicalrelid;
-- test failure scenarios with triggers on workers
\c - postgres - :worker_1_port
SET search_path to "Tenant Isolation";
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE FUNCTION abort_any_command()
RETURNS event_trigger
LANGUAGE plpgsql
AS $$
BEGIN
RAISE EXCEPTION 'command % is disabled', tg_tag;
END;
$$;
RESET citus.enable_metadata_sync;
CREATE EVENT TRIGGER abort_ddl ON ddl_command_end
EXECUTE PROCEDURE abort_any_command();
SET citus.override_table_visibility TO false;
\d
\c - mx_isolation_role_ent - :master_port
SET search_path to "Tenant Isolation";
\set VERBOSITY terse
SELECT isolate_tenant_to_new_shard('orders_streaming', 104, 'CASCADE', shard_transfer_mode => 'force_logical');
\set VERBOSITY default
\c - postgres - :worker_1_port
SET search_path to "Tenant Isolation";
SET citus.override_table_visibility TO false;
\d
DROP EVENT TRIGGER abort_ddl;
-- create a trigger for drops
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE FUNCTION abort_drop_command()
RETURNS event_trigger
LANGUAGE plpgsql
AS $$
BEGIN
RAISE EXCEPTION 'command % is disabled', tg_tag;
END;
$$;
RESET citus.enable_metadata_sync;
CREATE EVENT TRIGGER abort_drop ON sql_drop
EXECUTE PROCEDURE abort_drop_command();
\c - mx_isolation_role_ent - :master_port
SET search_path to "Tenant Isolation";
\set VERBOSITY terse
SELECT isolate_tenant_to_new_shard('orders_streaming', 104, 'CASCADE', shard_transfer_mode => 'force_logical');
\set VERBOSITY default
-- check if metadata is changed
SELECT * FROM pg_dist_shard
WHERE logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass
ORDER BY shardminvalue::BIGINT, logicalrelid;
\c - - - :worker_1_port
SET search_path to "Tenant Isolation";
-- however, new tables are already created
SET citus.override_table_visibility TO false;
\d
\c - postgres - :worker_1_port
DROP EVENT TRIGGER abort_drop;
\c - mx_isolation_role_ent - :master_port
SET search_path to "Tenant Isolation";
-- tests for cluster health
SELECT count(*) FROM lineitem_streaming;
SELECT count(*) FROM orders_streaming;
SELECT
l_orderkey,
sum(l_extendedprice * (1 - l_discount)) as revenue,
o_orderdate
FROM
orders_streaming,
lineitem_streaming
WHERE
l_orderkey = o_orderkey
GROUP BY
l_orderkey,
o_orderdate
ORDER BY
revenue DESC,
o_orderdate;
SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 99;
SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 100;
SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 101;
SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 102;
SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 103;
SELECT count(*) FROM orders_streaming WHERE o_orderkey = 99;
SELECT count(*) FROM orders_streaming WHERE o_orderkey = 100;
SELECT count(*) FROM orders_streaming WHERE o_orderkey = 101;
SELECT count(*) FROM orders_streaming WHERE o_orderkey = 102;
SELECT count(*) FROM orders_streaming WHERE o_orderkey = 103;
-- test composite types with tenant isolation
set search_path to default;
\c - postgres - :worker_1_port
SET search_path to "Tenant Isolation", public, pg_catalog;
-- ... create a test HASH function. Though it is a poor hash function,
-- it is acceptable for our tests
SET citus.enable_metadata_sync TO OFF;
CREATE FUNCTION test_composite_type_hash(test_composite_type) RETURNS int
AS 'SELECT hashtext( ($1.i + $1.i2)::text);'
LANGUAGE SQL
IMMUTABLE
RETURNS NULL ON NULL INPUT;
RESET citus.enable_metadata_sync;
CREATE OPERATOR CLASS cats_op_fam_class
DEFAULT FOR TYPE test_composite_type USING HASH AS
OPERATOR 1 = (test_composite_type, test_composite_type),
FUNCTION 1 test_composite_type_hash(test_composite_type);
\c - - - :worker_2_port
SET search_path to "Tenant Isolation", public, pg_catalog;
-- ... create a test HASH function. Though it is a poor hash function,
-- it is acceptable for our tests
SET citus.enable_metadata_sync TO OFF;
CREATE FUNCTION test_composite_type_hash(test_composite_type) RETURNS int
AS 'SELECT hashtext( ($1.i + $1.i2)::text);'
LANGUAGE SQL
IMMUTABLE
RETURNS NULL ON NULL INPUT;
RESET citus.enable_metadata_sync;
CREATE OPERATOR CLASS cats_op_fam_class
DEFAULT FOR TYPE test_composite_type USING HASH AS
OPERATOR 1 = (test_composite_type, test_composite_type),
FUNCTION 1 test_composite_type_hash(test_composite_type);
\c - mx_isolation_role_ent - :master_port
SET search_path to "Tenant Isolation", public, pg_catalog;
CREATE TABLE composite_table (
composite_key test_composite_type);
SELECT create_distributed_table('composite_table', 'composite_key');
INSERT INTO composite_table VALUES ('(1, 2)'::test_composite_type);
INSERT INTO composite_table VALUES ('(1, 3)'::test_composite_type);
INSERT INTO composite_table VALUES ('(1, 4)'::test_composite_type);
SELECT isolate_tenant_to_new_shard('composite_table', '(1, 3)', shard_transfer_mode => 'force_logical');
SELECT count(*) FROM composite_table WHERE composite_key = '(1, 2)'::test_composite_type;
SELECT count(*) FROM composite_table WHERE composite_key = '(1, 3)'::test_composite_type;
SELECT count(*) FROM composite_table WHERE composite_key = '(1, 4)'::test_composite_type;
DROP TABLE composite_table;
-- create foreign keys from a reference and distributed table
-- to another distributed table
SET search_path to "Tenant Isolation", public, pg_catalog;
SET citus.shard_replication_factor TO 1;
SET citus.shard_count to 8;
CREATE TABLE test_reference_table_fkey(id int PRIMARY KEY);
SELECT create_reference_table('test_reference_table_fkey');
CREATE TABLE test_colocated_table_1(id int PRIMARY KEY, value_1 int, FOREIGN KEY(id) REFERENCES test_colocated_table_1(id));
SELECT create_distributed_table('test_colocated_table_1', 'id', colocate_with => 'NONE');
CREATE TABLE test_colocated_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_reference_table_fkey(id), FOREIGN KEY(id) REFERENCES test_colocated_table_1(id));
SELECT create_distributed_table('test_colocated_table_2', 'id', colocate_with => 'test_colocated_table_1');
CREATE TABLE test_colocated_table_3(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_reference_table_fkey(id), FOREIGN KEY(id) REFERENCES test_colocated_table_1(id), FOREIGN KEY(id) REFERENCES test_colocated_table_2(id));
SELECT create_distributed_table('test_colocated_table_3', 'id', colocate_with => 'test_colocated_table_1');
CREATE TABLE test_colocated_table_no_rep_identity(id int, value_1 int, FOREIGN KEY(value_1) REFERENCES test_reference_table_fkey(id), FOREIGN KEY(id) REFERENCES test_colocated_table_1(id), FOREIGN KEY(id) REFERENCES test_colocated_table_2(id));
SELECT create_distributed_table('test_colocated_table_no_rep_identity', 'id', colocate_with => 'test_colocated_table_1');
INSERT INTO test_reference_table_fkey SELECT i FROM generate_series (0, 100) i;
INSERT INTO test_colocated_table_1 SELECT i, i FROM generate_series (0, 100) i;
INSERT INTO test_colocated_table_2 SELECT i, i FROM generate_series (0, 100) i;
INSERT INTO test_colocated_table_3 SELECT i, i FROM generate_series (0, 100) i;
INSERT INTO test_colocated_table_no_rep_identity SELECT i, i FROM generate_series (0, 100) i;
-- show that we donot support tenant isolation if the table has a colocated table with no replica identity and shard_transfer_mode=auto
SELECT isolate_tenant_to_new_shard('test_colocated_table_2', 1, 'CASCADE', shard_transfer_mode => 'auto');
-- show that we can isolate it after removing the colocated table with no replica identity
DROP TABLE test_colocated_table_no_rep_identity;
SELECT isolate_tenant_to_new_shard('test_colocated_table_2', 1, 'CASCADE', shard_transfer_mode => 'auto');
SELECT count(*) FROM test_colocated_table_2;
\c - postgres - :worker_1_port
-- show the foreign keys of the main table & its colocated shard on other tables
SELECT tbl.relname, fk."Constraint", fk."Definition"
FROM pg_catalog.pg_class tbl
JOIN public.table_fkeys fk on tbl.oid = fk.relid
WHERE tbl.relname like 'test_colocated_table_%'
ORDER BY 1, 2;
\c - mx_isolation_role_ent - :master_port
SET search_path to "Tenant Isolation";
--
-- Make sure that isolate_tenant_to_new_shard() replicats reference tables
-- when replicate_reference_tables_on_activate is off.
--
CREATE TABLE ref_table(a int);
SELECT create_reference_table('ref_table');
\c - postgres - :master_port
SET search_path to "Tenant Isolation";
-- partitioning tests
-- create partitioned table
CREATE TABLE partitioning_test(id int, time date) PARTITION BY RANGE (time);
-- create a regular partition
CREATE TABLE partitioning_test_2009 PARTITION OF partitioning_test FOR VALUES FROM ('2009-01-01') TO ('2010-01-01');
-- create a columnar partition
CREATE TABLE partitioning_test_2010 PARTITION OF partitioning_test FOR VALUES FROM ('2010-01-01') TO ('2011-01-01') USING columnar;
-- load some data and distribute tables
INSERT INTO partitioning_test VALUES (1, '2009-06-06');
INSERT INTO partitioning_test VALUES (2, '2010-07-07');
INSERT INTO partitioning_test_2009 VALUES (3, '2009-09-09');
INSERT INTO partitioning_test_2010 VALUES (4, '2010-03-03');
-- distribute partitioned table
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('partitioning_test', 'id');
SELECT count(*) FROM pg_dist_shard WHERE logicalrelid = 'partitioning_test'::regclass;
SELECT count(*) FROM partitioning_test;
-- isolate a value into its own shard
SELECT 1 FROM isolate_tenant_to_new_shard('partitioning_test', 2, 'CASCADE', shard_transfer_mode => 'force_logical');
SELECT count(*) FROM pg_dist_shard WHERE logicalrelid = 'partitioning_test'::regclass;
SELECT count(*) FROM partitioning_test;
SET citus.replicate_reference_tables_on_activate TO off;
SET client_min_messages TO WARNING;
SELECT 1 FROM master_add_node('localhost', :master_port, groupId=>0);
SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'ref_table'::regclass;
\c - mx_isolation_role_ent - :master_port
SET search_path to "Tenant Isolation";
SELECT 1 FROM isolate_tenant_to_new_shard('test_colocated_table_2', 2, 'CASCADE', shard_transfer_mode => 'force_logical');
SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'ref_table'::regclass;
\c - postgres - :master_port
SELECT 1 FROM master_remove_node('localhost', :master_port);
SET client_min_messages TO WARNING;
DROP SCHEMA "Tenant Isolation" CASCADE;
REVOKE ALL ON SCHEMA public FROM mx_isolation_role_ent;
DROP ROLE mx_isolation_role_ent;
-- stop & resync and stop syncing metadata
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
-- restart metadata sync for rest of the tests
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
-- make sure there are no tables with non-zero colocationid
SELECT count(*) FROM pg_catalog.pg_dist_partition WHERE colocationid > 0;
TRUNCATE TABLE pg_catalog.pg_dist_colocation;
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 100;
ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id;

View File

@ -177,7 +177,7 @@ CREATE VIEW view_created_before_shard_moves AS
SELECT * FROM view_created_before_shard_moves; SELECT * FROM view_created_before_shard_moves;
-- show that tenant isolation works fine -- show that tenant isolation works fine
SELECT isolate_tenant_to_new_shard('test', 5, 'CASCADE'); SELECT isolate_tenant_to_new_shard('test', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- in the first iteration, have an -- in the first iteration, have an
-- hybrid cluster meaning that -- hybrid cluster meaning that