Drop defer drop gucs (#6447)

DESCRIPTION: Drops GUC defer_drop_after_shard_split
DESCRIPTION: Drops GUC defer_drop_after_shard_move

Drop GUCs and related parts from the code.
Delete tests that specifically added for the GUCs.
Keep tests that can be used without the GUCs.
Update test output changes.

The motivation for this PR is to have an "always deferring" mechanism.
These two GUCs provide an option to not deferring dropping objects
during a shard move/split, and dropping them immediately. With this PR,
we will be always deferring dropping orphaned shards and other types of
objects.

We will have a separate PR to extend the deferred cleanup operation, so
that we would create records for deferred drop, for Subscriptions,
Publications, Replication Slots etc. This will make us be able to keep
track of created objects that needs to be dropped, during a shard
move/split. We will have objects created specifically for the current
operation; and those objects will be dropped at the end.

We have an issue (a draft roadmap) for enabling parallel shard moves.
For details please see: https://github.com/citusdata/citus/issues/6437
pull/6462/head
Ahmet Gedemenli 2022-10-25 16:48:34 +03:00 committed by GitHub
parent 915d1b3b38
commit c379ff8614
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
32 changed files with 199 additions and 2181 deletions

View File

@ -46,9 +46,6 @@
#include "distributed/shard_rebalancer.h" #include "distributed/shard_rebalancer.h"
#include "postmaster/postmaster.h" #include "postmaster/postmaster.h"
/* declarations for dynamic loading */
bool DeferShardDeleteOnSplit = true;
/* /*
* Entry for map that tracks ShardInterval -> Placement Node * Entry for map that tracks ShardInterval -> Placement Node
* created by split workflow. * created by split workflow.
@ -159,7 +156,6 @@ static uint64 GetNextShardIdForSplitChild(void);
static void AcquireNonblockingSplitLock(Oid relationId); static void AcquireNonblockingSplitLock(Oid relationId);
static List * GetWorkerNodesFromWorkerIds(List *nodeIdsForPlacementList); static List * GetWorkerNodesFromWorkerIds(List *nodeIdsForPlacementList);
static void DropShardListMetadata(List *shardIntervalList); static void DropShardListMetadata(List *shardIntervalList);
static void DropShardList(List *shardIntervalList);
static void InsertDeferredDropCleanupRecordsForShards(List *shardIntervalList); static void InsertDeferredDropCleanupRecordsForShards(List *shardIntervalList);
/* Customize error message strings based on operation type */ /* Customize error message strings based on operation type */
@ -633,26 +629,15 @@ BlockingShardSplit(SplitOperation splitOperation,
/* /*
* Delete old shards metadata and either mark the shards as * Delete old shards metadata and mark the shards as to be deferred drop.
* to be deferred drop or physically delete them.
* Have to do that before creating the new shard metadata, * Have to do that before creating the new shard metadata,
* because there's cross-checks preventing inconsistent metadata * because there's cross-checks preventing inconsistent metadata
* (like overlapping shards). * (like overlapping shards).
*/ */
if (DeferShardDeleteOnSplit)
{
ereport(LOG, (errmsg("marking deferred cleanup of source shard(s) for %s", ereport(LOG, (errmsg("marking deferred cleanup of source shard(s) for %s",
operationName))); operationName)));
InsertDeferredDropCleanupRecordsForShards(sourceColocatedShardIntervalList); InsertDeferredDropCleanupRecordsForShards(sourceColocatedShardIntervalList);
}
else
{
ereport(LOG, (errmsg("performing cleanup of source shard(s) for %s",
operationName)));
DropShardList(sourceColocatedShardIntervalList);
}
DropShardListMetadata(sourceColocatedShardIntervalList); DropShardListMetadata(sourceColocatedShardIntervalList);
@ -670,7 +655,7 @@ BlockingShardSplit(SplitOperation splitOperation,
/* /*
* Create foreign keys if exists after the metadata changes happening in * Create foreign keys if exists after the metadata changes happening in
* DropShardList() and InsertSplitChildrenShardMetadata() because the foreign * InsertSplitChildrenShardMetadata() because the foreign
* key creation depends on the new metadata. * key creation depends on the new metadata.
*/ */
CreateForeignKeyConstraints(shardGroupSplitIntervalListList, CreateForeignKeyConstraints(shardGroupSplitIntervalListList,
@ -1393,54 +1378,8 @@ DropShardListMetadata(List *shardIntervalList)
/* /*
* DropShardList drops actual shards from the worker nodes. * Insert deferred cleanup records.
*/ * The shards will be dropped by background cleaner later.
static void
DropShardList(List *shardIntervalList)
{
ListCell *shardIntervalCell = NULL;
foreach(shardIntervalCell, shardIntervalList)
{
ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell);
ListCell *shardPlacementCell = NULL;
uint64 oldShardId = shardInterval->shardId;
/* delete shard placements */
List *shardPlacementList = ActiveShardPlacementList(oldShardId);
foreach(shardPlacementCell, shardPlacementList)
{
ShardPlacement *placement = (ShardPlacement *) lfirst(shardPlacementCell);
char *workerName = placement->nodeName;
uint32 workerPort = placement->nodePort;
StringInfo dropQuery = makeStringInfo();
/* get shard name */
char *qualifiedShardName = ConstructQualifiedShardName(shardInterval);
char storageType = shardInterval->storageType;
if (storageType == SHARD_STORAGE_TABLE)
{
appendStringInfo(dropQuery, DROP_REGULAR_TABLE_COMMAND,
qualifiedShardName);
}
else if (storageType == SHARD_STORAGE_FOREIGN)
{
appendStringInfo(dropQuery, DROP_FOREIGN_TABLE_COMMAND,
qualifiedShardName);
}
/* drop old shard */
SendCommandToWorker(workerName, workerPort, dropQuery->data);
}
}
}
/*
* If deferred drop is enabled, insert deferred cleanup records instead of
* dropping actual shards from the worker nodes. The shards will be dropped
* by background cleaner later.
*/ */
static void static void
InsertDeferredDropCleanupRecordsForShards(List *shardIntervalList) InsertDeferredDropCleanupRecordsForShards(List *shardIntervalList)
@ -1698,26 +1637,15 @@ NonBlockingShardSplit(SplitOperation splitOperation,
SHARD_SPLIT); SHARD_SPLIT);
/* /*
* 10) Delete old shards metadata and either mark the shards as * 10) Delete old shards metadata and mark the shards as to be deferred drop.
* to be deferred drop or physically delete them.
* Have to do that before creating the new shard metadata, * Have to do that before creating the new shard metadata,
* because there's cross-checks preventing inconsistent metadata * because there's cross-checks preventing inconsistent metadata
* (like overlapping shards). * (like overlapping shards).
*/ */
if (DeferShardDeleteOnSplit)
{
ereport(LOG, (errmsg("marking deferred cleanup of source shard(s) for %s", ereport(LOG, (errmsg("marking deferred cleanup of source shard(s) for %s",
operationName))); operationName)));
InsertDeferredDropCleanupRecordsForShards(sourceColocatedShardIntervalList); InsertDeferredDropCleanupRecordsForShards(sourceColocatedShardIntervalList);
}
else
{
ereport(LOG, (errmsg("performing cleanup of source shard(s) for %s",
operationName)));
DropShardList(sourceColocatedShardIntervalList);
}
DropShardListMetadata(sourceColocatedShardIntervalList); DropShardListMetadata(sourceColocatedShardIntervalList);
@ -1769,7 +1697,7 @@ NonBlockingShardSplit(SplitOperation splitOperation,
/* /*
* 14) Create foreign keys if exists after the metadata changes happening in * 14) Create foreign keys if exists after the metadata changes happening in
* DropShardList() and InsertSplitChildrenShardMetadata() because the foreign * InsertSplitChildrenShardMetadata() because the foreign
* key creation depends on the new metadata. * key creation depends on the new metadata.
*/ */
CreateUncheckedForeignKeyConstraints(logicalRepTargetList); CreateUncheckedForeignKeyConstraints(logicalRepTargetList);

View File

@ -101,10 +101,9 @@ static List * RecreateTableDDLCommandList(Oid relationId);
static void EnsureTableListOwner(List *tableIdList); static void EnsureTableListOwner(List *tableIdList);
static void EnsureTableListSuitableForReplication(List *tableIdList); static void EnsureTableListSuitableForReplication(List *tableIdList);
static void DropColocatedShardPlacement(ShardInterval *shardInterval, char *nodeName,
int32 nodePort);
static void MarkForDropColocatedShardPlacement(ShardInterval *shardInterval, static void MarkForDropColocatedShardPlacement(ShardInterval *shardInterval,
char *nodeName, int32 nodePort); char *nodeName,
int32 nodePort);
static void UpdateColocatedShardPlacementMetadataOnWorkers(int64 shardId, static void UpdateColocatedShardPlacementMetadataOnWorkers(int64 shardId,
char *sourceNodeName, char *sourceNodeName,
int32 sourceNodePort, int32 sourceNodePort,
@ -138,8 +137,6 @@ PG_FUNCTION_INFO_V1(master_copy_shard_placement);
PG_FUNCTION_INFO_V1(citus_move_shard_placement); PG_FUNCTION_INFO_V1(citus_move_shard_placement);
PG_FUNCTION_INFO_V1(master_move_shard_placement); PG_FUNCTION_INFO_V1(master_move_shard_placement);
bool DeferShardDeleteOnMove = true;
double DesiredPercentFreeAfterMove = 10; double DesiredPercentFreeAfterMove = 10;
bool CheckAvailableSpaceBeforeMove = true; bool CheckAvailableSpaceBeforeMove = true;
@ -402,14 +399,7 @@ citus_move_shard_placement(PG_FUNCTION_ARGS)
} }
/* since this is move operation, we remove shards from source node after copy */ /* since this is move operation, we remove shards from source node after copy */
if (DeferShardDeleteOnMove)
{
MarkForDropColocatedShardPlacement(shardInterval, sourceNodeName, sourceNodePort); MarkForDropColocatedShardPlacement(shardInterval, sourceNodeName, sourceNodePort);
}
else
{
DropColocatedShardPlacement(shardInterval, sourceNodeName, sourceNodePort);
}
UpdateColocatedShardPlacementMetadataOnWorkers(shardId, sourceNodeName, UpdateColocatedShardPlacementMetadataOnWorkers(shardId, sourceNodeName,
sourceNodePort, targetNodeName, sourceNodePort, targetNodeName,
@ -1872,44 +1862,14 @@ RecreateTableDDLCommandList(Oid relationId)
} }
/*
* DropColocatedShardPlacement deletes the shard placement metadata for the given shard
* placement from the pg_dist_placement, and then it drops the shard table
* from the given node. The function does this for all colocated placements.
*/
static void
DropColocatedShardPlacement(ShardInterval *shardInterval, char *nodeName, int32 nodePort)
{
List *colocatedShardList = ColocatedShardIntervalList(shardInterval);
ListCell *colocatedShardCell = NULL;
foreach(colocatedShardCell, colocatedShardList)
{
ShardInterval *colocatedShard = (ShardInterval *) lfirst(colocatedShardCell);
char *qualifiedTableName = ConstructQualifiedShardName(colocatedShard);
StringInfo dropQuery = makeStringInfo();
uint64 shardId = colocatedShard->shardId;
List *shardPlacementList =
ShardPlacementListIncludingOrphanedPlacements(shardId);
ShardPlacement *placement =
SearchShardPlacementInListOrError(shardPlacementList, nodeName, nodePort);
appendStringInfo(dropQuery, DROP_REGULAR_TABLE_COMMAND, qualifiedTableName);
DeleteShardPlacementRow(placement->placementId);
SendCommandToWorker(nodeName, nodePort, dropQuery->data);
}
}
/* /*
* MarkForDropColocatedShardPlacement marks the shard placement metadata for * MarkForDropColocatedShardPlacement marks the shard placement metadata for
* the given shard placement to be deleted in pg_dist_placement. The function * the given shard placement to be deleted in pg_dist_placement. The function
* does this for all colocated placements. * does this for all colocated placements.
*/ */
static void static void
MarkForDropColocatedShardPlacement(ShardInterval *shardInterval, char *nodeName, int32 MarkForDropColocatedShardPlacement(ShardInterval *shardInterval, char *nodeName,
nodePort) int32 nodePort)
{ {
List *colocatedShardList = ColocatedShardIntervalList(shardInterval); List *colocatedShardList = ColocatedShardIntervalList(shardInterval);
ListCell *colocatedShardCell = NULL; ListCell *colocatedShardCell = NULL;

View File

@ -148,6 +148,8 @@ DEFINE_COLUMNAR_PASSTHROUGH_FUNC(test_columnar_storage_write_new_page)
static char *CitusVersion = CITUS_VERSION; static char *CitusVersion = CITUS_VERSION;
static char *DeprecatedEmptyString = ""; static char *DeprecatedEmptyString = "";
static char *MitmfifoEmptyString = ""; static char *MitmfifoEmptyString = "";
static bool DeprecatedDeferShardDeleteOnMove = true;
static bool DeprecatedDeferShardDeleteOnSplit = true;
/* deprecated GUC value that should not be used anywhere outside this file */ /* deprecated GUC value that should not be used anywhere outside this file */
static int ReplicationModel = REPLICATION_MODEL_STREAMING; static int ReplicationModel = REPLICATION_MODEL_STREAMING;
@ -1010,18 +1012,9 @@ RegisterCitusConfigVariables(void)
DefineCustomBoolVariable( DefineCustomBoolVariable(
"citus.defer_drop_after_shard_move", "citus.defer_drop_after_shard_move",
gettext_noop("When enabled a shard move will mark the original shards " gettext_noop("Deprecated, Citus always defers drop after shard move"),
"for deletion after a successful move, instead of deleting " NULL,
"them right away."), &DeprecatedDeferShardDeleteOnMove,
gettext_noop("The deletion of a shard can sometimes run into a conflict with a "
"long running transactions on a the shard during the drop phase of "
"the shard move. This causes some moves to be rolled back after "
"resources have been spend on moving the shard. To prevent "
"conflicts this feature lets you skip the actual deletion till a "
"later point in time. When used one should set "
"citus.defer_shard_delete_interval to make sure defered deletions "
"will be executed"),
&DeferShardDeleteOnMove,
true, true,
PGC_USERSET, PGC_USERSET,
0, 0,
@ -1029,18 +1022,9 @@ RegisterCitusConfigVariables(void)
DefineCustomBoolVariable( DefineCustomBoolVariable(
"citus.defer_drop_after_shard_split", "citus.defer_drop_after_shard_split",
gettext_noop("When enabled a shard split will mark the original shards " gettext_noop("Deprecated, Citus always defers drop after shard split"),
"for deletion after a successful split, instead of deleting " NULL,
"them right away."), &DeprecatedDeferShardDeleteOnSplit,
gettext_noop("The deletion of a shard can sometimes run into a conflict with a "
"long running transactions on a the shard during the drop phase of "
"the shard split. This causes some splits to be rolled back after "
"resources have been spend on moving the shard. To prevent "
"conflicts this feature lets you skip the actual deletion till a "
"later point in time. When used one should set "
"citus.defer_shard_delete_interval to make sure defered deletions "
"will be executed"),
&DeferShardDeleteOnSplit,
true, true,
PGC_USERSET, PGC_USERSET,
0, 0,

View File

@ -14,11 +14,9 @@
/* GUC to configure deferred shard deletion */ /* GUC to configure deferred shard deletion */
extern int DeferShardDeleteInterval; extern int DeferShardDeleteInterval;
extern int BackgroundTaskQueueCheckInterval; extern int BackgroundTaskQueueCheckInterval;
extern bool DeferShardDeleteOnMove;
extern double DesiredPercentFreeAfterMove; extern double DesiredPercentFreeAfterMove;
extern bool CheckAvailableSpaceBeforeMove; extern bool CheckAvailableSpaceBeforeMove;
extern bool DeferShardDeleteOnSplit;
extern int NextOperationId; extern int NextOperationId;
extern int NextCleanupRecordId; extern int NextCleanupRecordId;

View File

@ -41,7 +41,6 @@ INSERT INTO sensors SELECT i, '2020-01-05', '{}', 11011.10, 'A', 'I <3 Citus' FR
SET ROLE test_split_role; SET ROLE test_split_role;
SET search_path TO "citus_split_test_schema"; SET search_path TO "citus_split_test_schema";
SET citus.next_shard_id TO 8981007; SET citus.next_shard_id TO 8981007;
SET citus.defer_drop_after_shard_move TO OFF;
SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical');
citus_move_shard_placement citus_move_shard_placement
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -31,7 +31,6 @@ INSERT INTO sensors SELECT i, '2020-01-05', '{}', 11011.10, 'A', 'I <3 Citus' FR
\c - postgres - :master_port \c - postgres - :master_port
SET search_path TO "citus_split_test_schema"; SET search_path TO "citus_split_test_schema";
SET citus.next_shard_id TO 8981007; SET citus.next_shard_id TO 8981007;
SET citus.defer_drop_after_shard_move TO OFF;
-- BEGIN : Set node id variables -- BEGIN : Set node id variables
SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset
SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset

View File

@ -211,7 +211,6 @@ SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, node
SET ROLE test_shard_split_role; SET ROLE test_shard_split_role;
SET search_path TO "citus_split_test_schema"; SET search_path TO "citus_split_test_schema";
SET citus.next_shard_id TO 8981007; SET citus.next_shard_id TO 8981007;
SET citus.defer_drop_after_shard_move TO OFF;
SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical');
citus_move_shard_placement citus_move_shard_placement
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -219,6 +218,8 @@ SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localho
(1 row) (1 row)
-- END : Move one shard before we split it. -- END : Move one shard before we split it.
CALL pg_catalog.citus_cleanup_orphaned_shards();
NOTICE: cleaned up 3 orphaned shards
-- BEGIN : Set node id variables -- BEGIN : Set node id variables
SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset
SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset
@ -263,6 +264,8 @@ SELECT citus_move_shard_placement(8981007, 'localhost', :worker_1_port, 'localho
(1 row) (1 row)
-- END : Move a shard post split. -- END : Move a shard post split.
CALL pg_catalog.citus_cleanup_orphaned_shards();
NOTICE: cleaned up 3 orphaned shards
-- BEGIN : Display current state. -- BEGIN : Display current state.
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport
FROM pg_dist_shard AS shard FROM pg_dist_shard AS shard

View File

@ -1,565 +0,0 @@
/*
This suite runs without deferred drop enabled.
Citus Shard Split Test.The test is model similar to 'shard_move_constraints'.
Here is a high level overview of test plan:
1. Create a table 'sensors' (ShardCount = 2) to be split. Add indexes and statistics on this table.
2. Create two other tables: 'reference_table' and 'colocated_dist_table', co-located with sensors.
3. Create Foreign key constraints between the two co-located distributed tables.
4. Load data into the three tables.
5. Move one of the shards for 'sensors' to test ShardMove -> Split.
6. Trigger Split on both shards of 'sensors'. This will also split co-located tables.
7. Move one of the split shard to test Split -> ShardMove.
8. Split an already split shard second time on a different schema.
9. Create a colocated table with no replica identity.
10. Show we do not allow Split with the shard transfer mode 'auto' if any colocated table has no replica identity.
11. Drop the colocated table with no replica identity.
12. Show we allow Split with the shard transfer mode 'auto' if all colocated tables has replica identity.
*/
CREATE SCHEMA "citus_split_test_schema_no_deferred_drop";
SET citus.defer_drop_after_shard_split TO OFF;
CREATE ROLE test_shard_split_role_nodeferred_drop WITH LOGIN;
GRANT USAGE, CREATE ON SCHEMA "citus_split_test_schema_no_deferred_drop" TO test_shard_split_role_nodeferred_drop;
SET ROLE test_shard_split_role_nodeferred_drop;
SET search_path TO "citus_split_test_schema_no_deferred_drop";
SET citus.next_shard_id TO 8981000;
SET citus.next_placement_id TO 8610000;
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 1;
-- BEGIN: Create table to split, along with other co-located tables. Add indexes, statistics etc.
CREATE TABLE sensors(
measureid integer,
eventdatetime date,
measure_data jsonb,
meaure_quantity decimal(15, 2),
measure_status char(1),
measure_comment varchar(44),
PRIMARY KEY (measureid, eventdatetime, measure_data));
CREATE INDEX index_on_sensors ON sensors(lower(measureid::text));
ALTER INDEX index_on_sensors ALTER COLUMN 1 SET STATISTICS 1000;
CREATE INDEX hash_index_on_sensors ON sensors USING HASH((measure_data->'IsFailed'));
CREATE INDEX index_with_include_on_sensors ON sensors ((measure_data->'IsFailed')) INCLUDE (measure_data, eventdatetime, measure_status);
CREATE STATISTICS stats_on_sensors (dependencies) ON measureid, eventdatetime FROM sensors;
SELECT create_distributed_table('sensors', 'measureid', colocate_with:='none');
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- END: Create table to split, along with other co-located tables. Add indexes, statistics etc.
-- BEGIN: Create co-located distributed and reference tables.
CREATE TABLE reference_table (measureid integer PRIMARY KEY);
SELECT create_reference_table('reference_table');
create_reference_table
---------------------------------------------------------------------
(1 row)
CREATE TABLE colocated_dist_table (measureid integer PRIMARY KEY);
CLUSTER colocated_dist_table USING colocated_dist_table_pkey;
SELECT create_distributed_table('colocated_dist_table', 'measureid', colocate_with:='sensors');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE TABLE table_with_index_rep_identity(key int NOT NULL);
CREATE UNIQUE INDEX uqx ON table_with_index_rep_identity(key);
ALTER TABLE table_with_index_rep_identity REPLICA IDENTITY USING INDEX uqx;
CLUSTER table_with_index_rep_identity USING uqx;
SELECT create_distributed_table('table_with_index_rep_identity', 'key', colocate_with:='sensors');
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- END: Create co-located distributed and reference tables.
-- BEGIN : Create Foreign key constraints.
ALTER TABLE sensors ADD CONSTRAINT fkey_table_to_dist FOREIGN KEY (measureid) REFERENCES colocated_dist_table(measureid);
-- END : Create Foreign key constraints.
-- BEGIN : Load data into tables.
INSERT INTO reference_table SELECT i FROM generate_series(0,1000)i;
INSERT INTO colocated_dist_table SELECT i FROM generate_series(0,1000)i;
INSERT INTO sensors SELECT i, '2020-01-05', '{}', 11011.10, 'A', 'I <3 Citus' FROM generate_series(0,1000)i;
SELECT COUNT(*) FROM sensors;
count
---------------------------------------------------------------------
1001
(1 row)
SELECT COUNT(*) FROM reference_table;
count
---------------------------------------------------------------------
1001
(1 row)
SELECT COUNT(*) FROM colocated_dist_table;
count
---------------------------------------------------------------------
1001
(1 row)
-- END: Load data into tables.
-- BEGIN : Display current state.
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport
FROM pg_dist_shard AS shard
INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid
INNER JOIN pg_dist_node node ON placement.groupid = node.groupid
INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid
WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass)
ORDER BY logicalrelid, shardminvalue::BIGINT;
shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport
---------------------------------------------------------------------
8981000 | sensors | -2147483648 | -1 | localhost | 57637
8981001 | sensors | 0 | 2147483647 | localhost | 57638
8981003 | colocated_dist_table | -2147483648 | -1 | localhost | 57637
8981004 | colocated_dist_table | 0 | 2147483647 | localhost | 57638
8981005 | table_with_index_rep_identity | -2147483648 | -1 | localhost | 57637
8981006 | table_with_index_rep_identity | 0 | 2147483647 | localhost | 57638
(6 rows)
\c - - - :worker_1_port
SET search_path TO "citus_split_test_schema_no_deferred_drop", public, pg_catalog;
SET citus.show_shards_for_app_name_prefixes = '*';
SELECT tbl.relname, fk."Constraint", fk."Definition"
FROM pg_catalog.pg_class tbl
JOIN public.table_fkeys fk on tbl.oid = fk.relid
WHERE tbl.relname like 'sensors_%'
ORDER BY 1, 2;
relname | Constraint | Definition
---------------------------------------------------------------------
sensors_8981000 | fkey_table_to_dist_8981000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981003(measureid)
(1 row)
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2;
tablename | indexdef
---------------------------------------------------------------------
sensors_8981000 | CREATE INDEX hash_index_on_sensors_8981000 ON citus_split_test_schema_no_deferred_drop.sensors_8981000 USING hash (((measure_data -> 'IsFailed'::text)))
sensors_8981000 | CREATE INDEX index_on_sensors_8981000 ON citus_split_test_schema_no_deferred_drop.sensors_8981000 USING btree (lower((measureid)::text))
sensors_8981000 | CREATE INDEX index_with_include_on_sensors_8981000 ON citus_split_test_schema_no_deferred_drop.sensors_8981000 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status)
sensors_8981000 | CREATE UNIQUE INDEX sensors_pkey_8981000 ON citus_split_test_schema_no_deferred_drop.sensors_8981000 USING btree (measureid, eventdatetime, measure_data)
(4 rows)
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2;
tablename | indexdef
---------------------------------------------------------------------
table_with_index_rep_identity_8981005 | CREATE UNIQUE INDEX uqx_8981005 ON citus_split_test_schema_no_deferred_drop.table_with_index_rep_identity_8981005 USING btree (key)
(1 row)
SELECT stxname FROM pg_statistic_ext
WHERE stxnamespace IN (
SELECT oid
FROM pg_namespace
WHERE nspname IN ('citus_split_test_schema_no_deferred_drop')
)
ORDER BY stxname ASC;
stxname
---------------------------------------------------------------------
stats_on_sensors
stats_on_sensors_8981000
(2 rows)
\c - - - :worker_2_port
SET search_path TO "citus_split_test_schema_no_deferred_drop", public, pg_catalog;
SET citus.show_shards_for_app_name_prefixes = '*';
SELECT tbl.relname, fk."Constraint", fk."Definition"
FROM pg_catalog.pg_class tbl
JOIN public.table_fkeys fk on tbl.oid = fk.relid
WHERE tbl.relname like 'sensors_%'
ORDER BY 1, 2;
relname | Constraint | Definition
---------------------------------------------------------------------
sensors_8981001 | fkey_table_to_dist_8981001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981004(measureid)
(1 row)
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2;
tablename | indexdef
---------------------------------------------------------------------
sensors_8981001 | CREATE INDEX hash_index_on_sensors_8981001 ON citus_split_test_schema_no_deferred_drop.sensors_8981001 USING hash (((measure_data -> 'IsFailed'::text)))
sensors_8981001 | CREATE INDEX index_on_sensors_8981001 ON citus_split_test_schema_no_deferred_drop.sensors_8981001 USING btree (lower((measureid)::text))
sensors_8981001 | CREATE INDEX index_with_include_on_sensors_8981001 ON citus_split_test_schema_no_deferred_drop.sensors_8981001 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status)
sensors_8981001 | CREATE UNIQUE INDEX sensors_pkey_8981001 ON citus_split_test_schema_no_deferred_drop.sensors_8981001 USING btree (measureid, eventdatetime, measure_data)
(4 rows)
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2;
tablename | indexdef
---------------------------------------------------------------------
table_with_index_rep_identity_8981006 | CREATE UNIQUE INDEX uqx_8981006 ON citus_split_test_schema_no_deferred_drop.table_with_index_rep_identity_8981006 USING btree (key)
(1 row)
SELECT stxname FROM pg_statistic_ext
WHERE stxnamespace IN (
SELECT oid
FROM pg_namespace
WHERE nspname IN ('citus_split_test_schema_no_deferred_drop')
)
ORDER BY stxname ASC;
stxname
---------------------------------------------------------------------
stats_on_sensors
stats_on_sensors_8981001
(2 rows)
-- END : Display current state
-- BEGIN : Move one shard before we split it.
\c - postgres - :master_port
SET citus.defer_drop_after_shard_split TO OFF;
SET ROLE test_shard_split_role_nodeferred_drop;
SET search_path TO "citus_split_test_schema_no_deferred_drop";
SET citus.next_shard_id TO 8981007;
SET citus.defer_drop_after_shard_move TO OFF;
SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical');
citus_move_shard_placement
---------------------------------------------------------------------
(1 row)
-- END : Move one shard before we split it.
-- BEGIN : Set node id variables
SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset
SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset
-- END : Set node id variables
-- BEGIN : Split two shards : One with move and One without move.
-- Perform 2 way split
SELECT pg_catalog.citus_split_shard_by_split_points(
8981000,
ARRAY['-1073741824'],
ARRAY[:worker_1_node, :worker_2_node],
'force_logical');
citus_split_shard_by_split_points
---------------------------------------------------------------------
(1 row)
-- Perform 3 way split
SELECT pg_catalog.citus_split_shard_by_split_points(
8981001,
ARRAY['536870911', '1610612735'],
ARRAY[:worker_1_node, :worker_1_node, :worker_2_node],
'force_logical');
citus_split_shard_by_split_points
---------------------------------------------------------------------
(1 row)
-- END : Split two shards : One with move and One without move.
-- BEGIN : Move a shard post split.
SELECT citus_move_shard_placement(8981007, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='block_writes');
citus_move_shard_placement
---------------------------------------------------------------------
(1 row)
-- END : Move a shard post split.
-- BEGIN : Display current state.
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport
FROM pg_dist_shard AS shard
INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid
INNER JOIN pg_dist_node node ON placement.groupid = node.groupid
INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid
WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass)
ORDER BY logicalrelid, shardminvalue::BIGINT;
shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport
---------------------------------------------------------------------
8981007 | sensors | -2147483648 | -1073741824 | localhost | 57638
8981008 | sensors | -1073741823 | -1 | localhost | 57638
8981013 | sensors | 0 | 536870911 | localhost | 57637
8981014 | sensors | 536870912 | 1610612735 | localhost | 57637
8981015 | sensors | 1610612736 | 2147483647 | localhost | 57638
8981009 | colocated_dist_table | -2147483648 | -1073741824 | localhost | 57638
8981010 | colocated_dist_table | -1073741823 | -1 | localhost | 57638
8981016 | colocated_dist_table | 0 | 536870911 | localhost | 57637
8981017 | colocated_dist_table | 536870912 | 1610612735 | localhost | 57637
8981018 | colocated_dist_table | 1610612736 | 2147483647 | localhost | 57638
8981011 | table_with_index_rep_identity | -2147483648 | -1073741824 | localhost | 57638
8981012 | table_with_index_rep_identity | -1073741823 | -1 | localhost | 57638
8981019 | table_with_index_rep_identity | 0 | 536870911 | localhost | 57637
8981020 | table_with_index_rep_identity | 536870912 | 1610612735 | localhost | 57637
8981021 | table_with_index_rep_identity | 1610612736 | 2147483647 | localhost | 57638
(15 rows)
\c - - - :worker_1_port
SET search_path TO "citus_split_test_schema_no_deferred_drop", public, pg_catalog;
SET citus.show_shards_for_app_name_prefixes = '*';
SELECT tbl.relname, fk."Constraint", fk."Definition"
FROM pg_catalog.pg_class tbl
JOIN public.table_fkeys fk on tbl.oid = fk.relid
WHERE tbl.relname like 'sensors_%'
ORDER BY 1, 2;
relname | Constraint | Definition
---------------------------------------------------------------------
sensors_8981013 | fkey_table_to_dist_8981013 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981016(measureid)
sensors_8981014 | fkey_table_to_dist_8981014 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981017(measureid)
(2 rows)
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2;
tablename | indexdef
---------------------------------------------------------------------
sensors_8981013 | CREATE INDEX hash_index_on_sensors_8981013 ON citus_split_test_schema_no_deferred_drop.sensors_8981013 USING hash (((measure_data -> 'IsFailed'::text)))
sensors_8981013 | CREATE INDEX index_on_sensors_8981013 ON citus_split_test_schema_no_deferred_drop.sensors_8981013 USING btree (lower((measureid)::text))
sensors_8981013 | CREATE INDEX index_with_include_on_sensors_8981013 ON citus_split_test_schema_no_deferred_drop.sensors_8981013 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status)
sensors_8981013 | CREATE UNIQUE INDEX sensors_pkey_8981013 ON citus_split_test_schema_no_deferred_drop.sensors_8981013 USING btree (measureid, eventdatetime, measure_data)
sensors_8981014 | CREATE INDEX hash_index_on_sensors_8981014 ON citus_split_test_schema_no_deferred_drop.sensors_8981014 USING hash (((measure_data -> 'IsFailed'::text)))
sensors_8981014 | CREATE INDEX index_on_sensors_8981014 ON citus_split_test_schema_no_deferred_drop.sensors_8981014 USING btree (lower((measureid)::text))
sensors_8981014 | CREATE INDEX index_with_include_on_sensors_8981014 ON citus_split_test_schema_no_deferred_drop.sensors_8981014 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status)
sensors_8981014 | CREATE UNIQUE INDEX sensors_pkey_8981014 ON citus_split_test_schema_no_deferred_drop.sensors_8981014 USING btree (measureid, eventdatetime, measure_data)
(8 rows)
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2;
tablename | indexdef
---------------------------------------------------------------------
table_with_index_rep_identity_8981019 | CREATE UNIQUE INDEX uqx_8981019 ON citus_split_test_schema_no_deferred_drop.table_with_index_rep_identity_8981019 USING btree (key)
table_with_index_rep_identity_8981020 | CREATE UNIQUE INDEX uqx_8981020 ON citus_split_test_schema_no_deferred_drop.table_with_index_rep_identity_8981020 USING btree (key)
(2 rows)
SELECT stxname FROM pg_statistic_ext
WHERE stxnamespace IN (
SELECT oid
FROM pg_namespace
WHERE nspname IN ('citus_split_test_schema_no_deferred_drop')
)
ORDER BY stxname ASC;
stxname
---------------------------------------------------------------------
stats_on_sensors
stats_on_sensors_8981013
stats_on_sensors_8981014
(3 rows)
\c - - - :worker_2_port
SET search_path TO "citus_split_test_schema_no_deferred_drop", public, pg_catalog;
SET citus.show_shards_for_app_name_prefixes = '*';
SELECT tbl.relname, fk."Constraint", fk."Definition"
FROM pg_catalog.pg_class tbl
JOIN public.table_fkeys fk on tbl.oid = fk.relid
WHERE tbl.relname like 'sensors_%'
ORDER BY 1, 2;
relname | Constraint | Definition
---------------------------------------------------------------------
sensors_8981007 | fkey_table_to_dist_8981007 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981009(measureid)
sensors_8981008 | fkey_table_to_dist_8981008 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981010(measureid)
sensors_8981015 | fkey_table_to_dist_8981015 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981018(measureid)
(3 rows)
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2;
tablename | indexdef
---------------------------------------------------------------------
sensors_8981007 | CREATE INDEX hash_index_on_sensors_8981007 ON citus_split_test_schema_no_deferred_drop.sensors_8981007 USING hash (((measure_data -> 'IsFailed'::text)))
sensors_8981007 | CREATE INDEX index_on_sensors_8981007 ON citus_split_test_schema_no_deferred_drop.sensors_8981007 USING btree (lower((measureid)::text))
sensors_8981007 | CREATE INDEX index_with_include_on_sensors_8981007 ON citus_split_test_schema_no_deferred_drop.sensors_8981007 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status)
sensors_8981007 | CREATE UNIQUE INDEX sensors_pkey_8981007 ON citus_split_test_schema_no_deferred_drop.sensors_8981007 USING btree (measureid, eventdatetime, measure_data)
sensors_8981008 | CREATE INDEX hash_index_on_sensors_8981008 ON citus_split_test_schema_no_deferred_drop.sensors_8981008 USING hash (((measure_data -> 'IsFailed'::text)))
sensors_8981008 | CREATE INDEX index_on_sensors_8981008 ON citus_split_test_schema_no_deferred_drop.sensors_8981008 USING btree (lower((measureid)::text))
sensors_8981008 | CREATE INDEX index_with_include_on_sensors_8981008 ON citus_split_test_schema_no_deferred_drop.sensors_8981008 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status)
sensors_8981008 | CREATE UNIQUE INDEX sensors_pkey_8981008 ON citus_split_test_schema_no_deferred_drop.sensors_8981008 USING btree (measureid, eventdatetime, measure_data)
sensors_8981015 | CREATE INDEX hash_index_on_sensors_8981015 ON citus_split_test_schema_no_deferred_drop.sensors_8981015 USING hash (((measure_data -> 'IsFailed'::text)))
sensors_8981015 | CREATE INDEX index_on_sensors_8981015 ON citus_split_test_schema_no_deferred_drop.sensors_8981015 USING btree (lower((measureid)::text))
sensors_8981015 | CREATE INDEX index_with_include_on_sensors_8981015 ON citus_split_test_schema_no_deferred_drop.sensors_8981015 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status)
sensors_8981015 | CREATE UNIQUE INDEX sensors_pkey_8981015 ON citus_split_test_schema_no_deferred_drop.sensors_8981015 USING btree (measureid, eventdatetime, measure_data)
(12 rows)
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2;
tablename | indexdef
---------------------------------------------------------------------
table_with_index_rep_identity_8981011 | CREATE UNIQUE INDEX uqx_8981011 ON citus_split_test_schema_no_deferred_drop.table_with_index_rep_identity_8981011 USING btree (key)
table_with_index_rep_identity_8981012 | CREATE UNIQUE INDEX uqx_8981012 ON citus_split_test_schema_no_deferred_drop.table_with_index_rep_identity_8981012 USING btree (key)
table_with_index_rep_identity_8981021 | CREATE UNIQUE INDEX uqx_8981021 ON citus_split_test_schema_no_deferred_drop.table_with_index_rep_identity_8981021 USING btree (key)
(3 rows)
SELECT stxname FROM pg_statistic_ext
WHERE stxnamespace IN (
SELECT oid
FROM pg_namespace
WHERE nspname IN ('citus_split_test_schema_no_deferred_drop')
)
ORDER BY stxname ASC;
stxname
---------------------------------------------------------------------
stats_on_sensors
stats_on_sensors_8981007
stats_on_sensors_8981008
stats_on_sensors_8981015
(4 rows)
-- END : Display current state
-- BEGIN: Should be able to change/drop constraints
\c - postgres - :master_port
SET citus.defer_drop_after_shard_split TO OFF;
SET ROLE test_shard_split_role_nodeferred_drop;
SET search_path TO "citus_split_test_schema_no_deferred_drop";
ALTER INDEX index_on_sensors RENAME TO index_on_sensors_renamed;
ALTER INDEX index_on_sensors_renamed ALTER COLUMN 1 SET STATISTICS 200;
DROP STATISTICS stats_on_sensors;
DROP INDEX index_on_sensors_renamed;
ALTER TABLE sensors DROP CONSTRAINT fkey_table_to_dist;
-- END: Should be able to change/drop constraints
-- BEGIN: Split second time on another schema
SET search_path TO public;
SET citus.next_shard_id TO 8981031;
SELECT pg_catalog.citus_split_shard_by_split_points(
8981007,
ARRAY['-2100000000'],
ARRAY[:worker_1_node, :worker_2_node],
'force_logical');
citus_split_shard_by_split_points
---------------------------------------------------------------------
(1 row)
SET search_path TO "citus_split_test_schema_no_deferred_drop";
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport
FROM pg_dist_shard AS shard
INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid
INNER JOIN pg_dist_node node ON placement.groupid = node.groupid
INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid
WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass)
ORDER BY logicalrelid, shardminvalue::BIGINT;
shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport
---------------------------------------------------------------------
8981031 | sensors | -2147483648 | -2100000000 | localhost | 57637
8981032 | sensors | -2099999999 | -1073741824 | localhost | 57638
8981008 | sensors | -1073741823 | -1 | localhost | 57638
8981013 | sensors | 0 | 536870911 | localhost | 57637
8981014 | sensors | 536870912 | 1610612735 | localhost | 57637
8981015 | sensors | 1610612736 | 2147483647 | localhost | 57638
8981033 | colocated_dist_table | -2147483648 | -2100000000 | localhost | 57637
8981034 | colocated_dist_table | -2099999999 | -1073741824 | localhost | 57638
8981010 | colocated_dist_table | -1073741823 | -1 | localhost | 57638
8981016 | colocated_dist_table | 0 | 536870911 | localhost | 57637
8981017 | colocated_dist_table | 536870912 | 1610612735 | localhost | 57637
8981018 | colocated_dist_table | 1610612736 | 2147483647 | localhost | 57638
8981035 | table_with_index_rep_identity | -2147483648 | -2100000000 | localhost | 57637
8981036 | table_with_index_rep_identity | -2099999999 | -1073741824 | localhost | 57638
8981012 | table_with_index_rep_identity | -1073741823 | -1 | localhost | 57638
8981019 | table_with_index_rep_identity | 0 | 536870911 | localhost | 57637
8981020 | table_with_index_rep_identity | 536870912 | 1610612735 | localhost | 57637
8981021 | table_with_index_rep_identity | 1610612736 | 2147483647 | localhost | 57638
(18 rows)
-- END: Split second time on another schema
-- BEGIN: Create a co-located table with no replica identity.
CREATE TABLE table_no_rep_id (measureid integer);
SELECT create_distributed_table('table_no_rep_id', 'measureid', colocate_with:='sensors');
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- END: Create a co-located table with no replica identity.
-- BEGIN: Split a shard with shard_transfer_mode='auto' and with a colocated table with no replica identity
SET citus.next_shard_id TO 8981041;
SELECT pg_catalog.citus_split_shard_by_split_points(
8981031,
ARRAY['-2120000000'],
ARRAY[:worker_1_node, :worker_2_node]);
ERROR: cannot use logical replication to transfer shards of the relation table_no_rep_id since it doesn't have a REPLICA IDENTITY or PRIMARY KEY
DETAIL: UPDATE and DELETE commands on the shard will error out during logical replication unless there is a REPLICA IDENTITY or PRIMARY KEY.
HINT: If you wish to continue without a replica identity set the shard_transfer_mode to 'force_logical' or 'block_writes'.
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport
FROM pg_dist_shard AS shard
INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid
INNER JOIN pg_dist_node node ON placement.groupid = node.groupid
INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid
WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass)
ORDER BY logicalrelid, shardminvalue::BIGINT;
shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport
---------------------------------------------------------------------
8981031 | sensors | -2147483648 | -2100000000 | localhost | 57637
8981032 | sensors | -2099999999 | -1073741824 | localhost | 57638
8981008 | sensors | -1073741823 | -1 | localhost | 57638
8981013 | sensors | 0 | 536870911 | localhost | 57637
8981014 | sensors | 536870912 | 1610612735 | localhost | 57637
8981015 | sensors | 1610612736 | 2147483647 | localhost | 57638
8981033 | colocated_dist_table | -2147483648 | -2100000000 | localhost | 57637
8981034 | colocated_dist_table | -2099999999 | -1073741824 | localhost | 57638
8981010 | colocated_dist_table | -1073741823 | -1 | localhost | 57638
8981016 | colocated_dist_table | 0 | 536870911 | localhost | 57637
8981017 | colocated_dist_table | 536870912 | 1610612735 | localhost | 57637
8981018 | colocated_dist_table | 1610612736 | 2147483647 | localhost | 57638
8981035 | table_with_index_rep_identity | -2147483648 | -2100000000 | localhost | 57637
8981036 | table_with_index_rep_identity | -2099999999 | -1073741824 | localhost | 57638
8981012 | table_with_index_rep_identity | -1073741823 | -1 | localhost | 57638
8981019 | table_with_index_rep_identity | 0 | 536870911 | localhost | 57637
8981020 | table_with_index_rep_identity | 536870912 | 1610612735 | localhost | 57637
8981021 | table_with_index_rep_identity | 1610612736 | 2147483647 | localhost | 57638
(18 rows)
-- END: Split a shard with shard_transfer_mode='auto' and with a colocated table with no replica identity
-- BEGIN: Drop the co-located table with no replica identity.
DROP TABLE table_no_rep_id;
-- END: Drop the co-located table with no replica identity.
-- BEGIN: Split a shard with shard_transfer_mode='auto' and with all colocated tables has replica identity
SET citus.next_shard_id TO 8981041;
SELECT pg_catalog.citus_split_shard_by_split_points(
8981031,
ARRAY['-2120000000'],
ARRAY[:worker_1_node, :worker_2_node],
'auto');
citus_split_shard_by_split_points
---------------------------------------------------------------------
(1 row)
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport
FROM pg_dist_shard AS shard
INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid
INNER JOIN pg_dist_node node ON placement.groupid = node.groupid
INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid
WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass)
ORDER BY logicalrelid, shardminvalue::BIGINT;
shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport
---------------------------------------------------------------------
8981041 | sensors | -2147483648 | -2120000000 | localhost | 57637
8981042 | sensors | -2119999999 | -2100000000 | localhost | 57638
8981032 | sensors | -2099999999 | -1073741824 | localhost | 57638
8981008 | sensors | -1073741823 | -1 | localhost | 57638
8981013 | sensors | 0 | 536870911 | localhost | 57637
8981014 | sensors | 536870912 | 1610612735 | localhost | 57637
8981015 | sensors | 1610612736 | 2147483647 | localhost | 57638
8981043 | colocated_dist_table | -2147483648 | -2120000000 | localhost | 57637
8981044 | colocated_dist_table | -2119999999 | -2100000000 | localhost | 57638
8981034 | colocated_dist_table | -2099999999 | -1073741824 | localhost | 57638
8981010 | colocated_dist_table | -1073741823 | -1 | localhost | 57638
8981016 | colocated_dist_table | 0 | 536870911 | localhost | 57637
8981017 | colocated_dist_table | 536870912 | 1610612735 | localhost | 57637
8981018 | colocated_dist_table | 1610612736 | 2147483647 | localhost | 57638
8981045 | table_with_index_rep_identity | -2147483648 | -2120000000 | localhost | 57637
8981046 | table_with_index_rep_identity | -2119999999 | -2100000000 | localhost | 57638
8981036 | table_with_index_rep_identity | -2099999999 | -1073741824 | localhost | 57638
8981012 | table_with_index_rep_identity | -1073741823 | -1 | localhost | 57638
8981019 | table_with_index_rep_identity | 0 | 536870911 | localhost | 57637
8981020 | table_with_index_rep_identity | 536870912 | 1610612735 | localhost | 57637
8981021 | table_with_index_rep_identity | 1610612736 | 2147483647 | localhost | 57638
(21 rows)
-- END: Split a shard with shard_transfer_mode='auto' and with all colocated tables has replica identity
-- BEGIN: Validate Data Count
SELECT COUNT(*) FROM sensors;
count
---------------------------------------------------------------------
1001
(1 row)
SELECT COUNT(*) FROM reference_table;
count
---------------------------------------------------------------------
1001
(1 row)
SELECT COUNT(*) FROM colocated_dist_table;
count
---------------------------------------------------------------------
1001
(1 row)
-- END: Validate Data Count
--BEGIN : Cleanup
\c - postgres - :master_port
DROP SCHEMA "citus_split_test_schema_no_deferred_drop" CASCADE;
NOTICE: drop cascades to 4 other objects
DETAIL: drop cascades to table citus_split_test_schema_no_deferred_drop.sensors
drop cascades to table citus_split_test_schema_no_deferred_drop.reference_table
drop cascades to table citus_split_test_schema_no_deferred_drop.colocated_dist_table
drop cascades to table citus_split_test_schema_no_deferred_drop.table_with_index_rep_identity
SET citus.defer_drop_after_shard_split TO ON;
--END : Cleanup

View File

@ -207,7 +207,6 @@ SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, node
SET ROLE test_split_role; SET ROLE test_split_role;
SET search_path TO "citus_split_test_schema"; SET search_path TO "citus_split_test_schema";
SET citus.next_shard_id TO 8981007; SET citus.next_shard_id TO 8981007;
SET citus.defer_drop_after_shard_move TO OFF;
SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical');
citus_move_shard_placement citus_move_shard_placement
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -215,6 +214,8 @@ SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localho
(1 row) (1 row)
-- END : Move one shard before we split it. -- END : Move one shard before we split it.
CALL citus_cleanup_orphaned_shards();
NOTICE: cleaned up 3 orphaned shards
-- BEGIN : Set node id variables -- BEGIN : Set node id variables
SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset
SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset
@ -259,6 +260,8 @@ SELECT citus_move_shard_placement(8981007, 'localhost', :worker_1_port, 'localho
(1 row) (1 row)
-- END : Move a shard post split. -- END : Move a shard post split.
CALL citus_cleanup_orphaned_shards();
NOTICE: cleaned up 3 orphaned shards
-- BEGIN : Display current state. -- BEGIN : Display current state.
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport
FROM pg_dist_shard AS shard FROM pg_dist_shard AS shard

View File

@ -1,464 +0,0 @@
/*
This suite runs without deferred drop enabled.
Citus Shard Split Test.The test is model similar to 'shard_move_constraints'.
Here is a high level overview of test plan:
1. Create a table 'sensors' (ShardCount = 2) to be split. Add indexes and statistics on this table.
2. Create two other tables: 'reference_table' and 'colocated_dist_table', co-located with sensors.
3. Create Foreign key constraints between the two co-located distributed tables.
4. Load data into the three tables.
5. Move one of the shards for 'sensors' to test ShardMove -> Split.
6. Trigger Split on both shards of 'sensors'. This will also split co-located tables.
7. Move one of the split shard to test Split -> ShardMove.
8. Split an already split shard second time on a different schema.
*/
CREATE SCHEMA "citus_split_test_schema_no_deferred_drop";
SET citus.defer_drop_after_shard_split TO OFF;
CREATE ROLE test_split_deferred_role WITH LOGIN;
GRANT USAGE, CREATE ON SCHEMA "citus_split_test_schema_no_deferred_drop" TO test_split_deferred_role;
SET ROLE test_split_deferred_role;
SET search_path TO "citus_split_test_schema_no_deferred_drop";
SET citus.next_shard_id TO 8981000;
SET citus.next_placement_id TO 8610000;
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 1;
-- BEGIN: Create table to split, along with other co-located tables. Add indexes, statistics etc.
CREATE TABLE sensors(
measureid integer,
eventdatetime date,
measure_data jsonb,
meaure_quantity decimal(15, 2),
measure_status char(1),
measure_comment varchar(44),
PRIMARY KEY (measureid, eventdatetime, measure_data));
CREATE INDEX index_on_sensors ON sensors(lower(measureid::text));
ALTER INDEX index_on_sensors ALTER COLUMN 1 SET STATISTICS 1000;
CREATE INDEX hash_index_on_sensors ON sensors USING HASH((measure_data->'IsFailed'));
CREATE INDEX index_with_include_on_sensors ON sensors ((measure_data->'IsFailed')) INCLUDE (measure_data, eventdatetime, measure_status);
CREATE STATISTICS stats_on_sensors (dependencies) ON measureid, eventdatetime FROM sensors;
SELECT create_distributed_table('sensors', 'measureid', colocate_with:='none');
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- END: Create table to split, along with other co-located tables. Add indexes, statistics etc.
-- BEGIN: Create co-located distributed and reference tables.
CREATE TABLE reference_table (measureid integer PRIMARY KEY);
SELECT create_reference_table('reference_table');
create_reference_table
---------------------------------------------------------------------
(1 row)
CREATE TABLE colocated_dist_table (measureid integer PRIMARY KEY);
CLUSTER colocated_dist_table USING colocated_dist_table_pkey;
SELECT create_distributed_table('colocated_dist_table', 'measureid', colocate_with:='sensors');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE TABLE table_with_index_rep_identity(key int NOT NULL);
CREATE UNIQUE INDEX uqx ON table_with_index_rep_identity(key);
ALTER TABLE table_with_index_rep_identity REPLICA IDENTITY USING INDEX uqx;
CLUSTER table_with_index_rep_identity USING uqx;
SELECT create_distributed_table('table_with_index_rep_identity', 'key', colocate_with:='sensors');
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- END: Create co-located distributed and reference tables.
-- BEGIN : Create Foreign key constraints.
ALTER TABLE sensors ADD CONSTRAINT fkey_table_to_dist FOREIGN KEY (measureid) REFERENCES colocated_dist_table(measureid);
-- END : Create Foreign key constraints.
-- BEGIN : Load data into tables.
INSERT INTO reference_table SELECT i FROM generate_series(0,1000)i;
INSERT INTO colocated_dist_table SELECT i FROM generate_series(0,1000)i;
INSERT INTO sensors SELECT i, '2020-01-05', '{}', 11011.10, 'A', 'I <3 Citus' FROM generate_series(0,1000)i;
SELECT COUNT(*) FROM sensors;
count
---------------------------------------------------------------------
1001
(1 row)
SELECT COUNT(*) FROM reference_table;
count
---------------------------------------------------------------------
1001
(1 row)
SELECT COUNT(*) FROM colocated_dist_table;
count
---------------------------------------------------------------------
1001
(1 row)
-- END: Load data into tables.
-- BEGIN : Display current state.
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport
FROM pg_dist_shard AS shard
INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid
INNER JOIN pg_dist_node node ON placement.groupid = node.groupid
INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid
WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass)
ORDER BY logicalrelid, shardminvalue::BIGINT;
shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport
---------------------------------------------------------------------
8981000 | sensors | -2147483648 | -1 | localhost | 57637
8981001 | sensors | 0 | 2147483647 | localhost | 57638
8981003 | colocated_dist_table | -2147483648 | -1 | localhost | 57637
8981004 | colocated_dist_table | 0 | 2147483647 | localhost | 57638
8981005 | table_with_index_rep_identity | -2147483648 | -1 | localhost | 57637
8981006 | table_with_index_rep_identity | 0 | 2147483647 | localhost | 57638
(6 rows)
\c - - - :worker_1_port
SET search_path TO "citus_split_test_schema_no_deferred_drop", public, pg_catalog;
SET citus.show_shards_for_app_name_prefixes = '*';
SELECT tbl.relname, fk."Constraint", fk."Definition"
FROM pg_catalog.pg_class tbl
JOIN public.table_fkeys fk on tbl.oid = fk.relid
WHERE tbl.relname like 'sensors_%'
ORDER BY 1, 2;
relname | Constraint | Definition
---------------------------------------------------------------------
sensors_8981000 | fkey_table_to_dist_8981000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981003(measureid)
(1 row)
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2;
tablename | indexdef
---------------------------------------------------------------------
sensors_8981000 | CREATE INDEX hash_index_on_sensors_8981000 ON citus_split_test_schema_no_deferred_drop.sensors_8981000 USING hash (((measure_data -> 'IsFailed'::text)))
sensors_8981000 | CREATE INDEX index_on_sensors_8981000 ON citus_split_test_schema_no_deferred_drop.sensors_8981000 USING btree (lower((measureid)::text))
sensors_8981000 | CREATE INDEX index_with_include_on_sensors_8981000 ON citus_split_test_schema_no_deferred_drop.sensors_8981000 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status)
sensors_8981000 | CREATE UNIQUE INDEX sensors_pkey_8981000 ON citus_split_test_schema_no_deferred_drop.sensors_8981000 USING btree (measureid, eventdatetime, measure_data)
(4 rows)
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2;
tablename | indexdef
---------------------------------------------------------------------
table_with_index_rep_identity_8981005 | CREATE UNIQUE INDEX uqx_8981005 ON citus_split_test_schema_no_deferred_drop.table_with_index_rep_identity_8981005 USING btree (key)
(1 row)
SELECT stxname FROM pg_statistic_ext
WHERE stxnamespace IN (
SELECT oid
FROM pg_namespace
WHERE nspname IN ('citus_split_test_schema_no_deferred_drop')
)
ORDER BY stxname ASC;
stxname
---------------------------------------------------------------------
stats_on_sensors
stats_on_sensors_8981000
(2 rows)
\c - - - :worker_2_port
SET search_path TO "citus_split_test_schema_no_deferred_drop", public, pg_catalog;
SET citus.show_shards_for_app_name_prefixes = '*';
SELECT tbl.relname, fk."Constraint", fk."Definition"
FROM pg_catalog.pg_class tbl
JOIN public.table_fkeys fk on tbl.oid = fk.relid
WHERE tbl.relname like 'sensors_%'
ORDER BY 1, 2;
relname | Constraint | Definition
---------------------------------------------------------------------
sensors_8981001 | fkey_table_to_dist_8981001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981004(measureid)
(1 row)
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2;
tablename | indexdef
---------------------------------------------------------------------
sensors_8981001 | CREATE INDEX hash_index_on_sensors_8981001 ON citus_split_test_schema_no_deferred_drop.sensors_8981001 USING hash (((measure_data -> 'IsFailed'::text)))
sensors_8981001 | CREATE INDEX index_on_sensors_8981001 ON citus_split_test_schema_no_deferred_drop.sensors_8981001 USING btree (lower((measureid)::text))
sensors_8981001 | CREATE INDEX index_with_include_on_sensors_8981001 ON citus_split_test_schema_no_deferred_drop.sensors_8981001 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status)
sensors_8981001 | CREATE UNIQUE INDEX sensors_pkey_8981001 ON citus_split_test_schema_no_deferred_drop.sensors_8981001 USING btree (measureid, eventdatetime, measure_data)
(4 rows)
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2;
tablename | indexdef
---------------------------------------------------------------------
table_with_index_rep_identity_8981006 | CREATE UNIQUE INDEX uqx_8981006 ON citus_split_test_schema_no_deferred_drop.table_with_index_rep_identity_8981006 USING btree (key)
(1 row)
SELECT stxname FROM pg_statistic_ext
WHERE stxnamespace IN (
SELECT oid
FROM pg_namespace
WHERE nspname IN ('citus_split_test_schema_no_deferred_drop')
)
ORDER BY stxname ASC;
stxname
---------------------------------------------------------------------
stats_on_sensors
stats_on_sensors_8981001
(2 rows)
-- END : Display current state
-- BEGIN : Move one shard before we split it.
\c - postgres - :master_port
SET citus.defer_drop_after_shard_split TO OFF;
SET ROLE test_split_deferred_role;
SET search_path TO "citus_split_test_schema_no_deferred_drop";
SET citus.next_shard_id TO 8981007;
SET citus.defer_drop_after_shard_move TO OFF;
SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical');
citus_move_shard_placement
---------------------------------------------------------------------
(1 row)
-- END : Move one shard before we split it.
-- BEGIN : Set node id variables
SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset
SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset
-- END : Set node id variables
-- BEGIN : Split two shards : One with move and One without move.
-- Perform 2 way split
SELECT pg_catalog.citus_split_shard_by_split_points(
8981000,
ARRAY['-1073741824'],
ARRAY[:worker_1_node, :worker_2_node],
'block_writes');
citus_split_shard_by_split_points
---------------------------------------------------------------------
(1 row)
-- Perform 3 way split
SELECT pg_catalog.citus_split_shard_by_split_points(
8981001,
ARRAY['536870911', '1610612735'],
ARRAY[:worker_1_node, :worker_1_node, :worker_2_node],
'block_writes');
citus_split_shard_by_split_points
---------------------------------------------------------------------
(1 row)
-- END : Split two shards : One with move and One without move.
-- BEGIN : Move a shard post split.
SELECT citus_move_shard_placement(8981007, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='block_writes');
citus_move_shard_placement
---------------------------------------------------------------------
(1 row)
-- END : Move a shard post split.
-- BEGIN : Display current state.
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport
FROM pg_dist_shard AS shard
INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid
INNER JOIN pg_dist_node node ON placement.groupid = node.groupid
INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid
WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass)
ORDER BY logicalrelid, shardminvalue::BIGINT;
shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport
---------------------------------------------------------------------
8981007 | sensors | -2147483648 | -1073741824 | localhost | 57638
8981008 | sensors | -1073741823 | -1 | localhost | 57638
8981013 | sensors | 0 | 536870911 | localhost | 57637
8981014 | sensors | 536870912 | 1610612735 | localhost | 57637
8981015 | sensors | 1610612736 | 2147483647 | localhost | 57638
8981009 | colocated_dist_table | -2147483648 | -1073741824 | localhost | 57638
8981010 | colocated_dist_table | -1073741823 | -1 | localhost | 57638
8981016 | colocated_dist_table | 0 | 536870911 | localhost | 57637
8981017 | colocated_dist_table | 536870912 | 1610612735 | localhost | 57637
8981018 | colocated_dist_table | 1610612736 | 2147483647 | localhost | 57638
8981011 | table_with_index_rep_identity | -2147483648 | -1073741824 | localhost | 57638
8981012 | table_with_index_rep_identity | -1073741823 | -1 | localhost | 57638
8981019 | table_with_index_rep_identity | 0 | 536870911 | localhost | 57637
8981020 | table_with_index_rep_identity | 536870912 | 1610612735 | localhost | 57637
8981021 | table_with_index_rep_identity | 1610612736 | 2147483647 | localhost | 57638
(15 rows)
\c - - - :worker_1_port
SET search_path TO "citus_split_test_schema_no_deferred_drop", public, pg_catalog;
SET citus.show_shards_for_app_name_prefixes = '*';
SELECT tbl.relname, fk."Constraint", fk."Definition"
FROM pg_catalog.pg_class tbl
JOIN public.table_fkeys fk on tbl.oid = fk.relid
WHERE tbl.relname like 'sensors_%'
ORDER BY 1, 2;
relname | Constraint | Definition
---------------------------------------------------------------------
sensors_8981013 | fkey_table_to_dist_8981013 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981016(measureid)
sensors_8981014 | fkey_table_to_dist_8981014 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981017(measureid)
(2 rows)
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2;
tablename | indexdef
---------------------------------------------------------------------
sensors_8981013 | CREATE INDEX hash_index_on_sensors_8981013 ON citus_split_test_schema_no_deferred_drop.sensors_8981013 USING hash (((measure_data -> 'IsFailed'::text)))
sensors_8981013 | CREATE INDEX index_on_sensors_8981013 ON citus_split_test_schema_no_deferred_drop.sensors_8981013 USING btree (lower((measureid)::text))
sensors_8981013 | CREATE INDEX index_with_include_on_sensors_8981013 ON citus_split_test_schema_no_deferred_drop.sensors_8981013 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status)
sensors_8981013 | CREATE UNIQUE INDEX sensors_pkey_8981013 ON citus_split_test_schema_no_deferred_drop.sensors_8981013 USING btree (measureid, eventdatetime, measure_data)
sensors_8981014 | CREATE INDEX hash_index_on_sensors_8981014 ON citus_split_test_schema_no_deferred_drop.sensors_8981014 USING hash (((measure_data -> 'IsFailed'::text)))
sensors_8981014 | CREATE INDEX index_on_sensors_8981014 ON citus_split_test_schema_no_deferred_drop.sensors_8981014 USING btree (lower((measureid)::text))
sensors_8981014 | CREATE INDEX index_with_include_on_sensors_8981014 ON citus_split_test_schema_no_deferred_drop.sensors_8981014 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status)
sensors_8981014 | CREATE UNIQUE INDEX sensors_pkey_8981014 ON citus_split_test_schema_no_deferred_drop.sensors_8981014 USING btree (measureid, eventdatetime, measure_data)
(8 rows)
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2;
tablename | indexdef
---------------------------------------------------------------------
table_with_index_rep_identity_8981019 | CREATE UNIQUE INDEX uqx_8981019 ON citus_split_test_schema_no_deferred_drop.table_with_index_rep_identity_8981019 USING btree (key)
table_with_index_rep_identity_8981020 | CREATE UNIQUE INDEX uqx_8981020 ON citus_split_test_schema_no_deferred_drop.table_with_index_rep_identity_8981020 USING btree (key)
(2 rows)
SELECT stxname FROM pg_statistic_ext
WHERE stxnamespace IN (
SELECT oid
FROM pg_namespace
WHERE nspname IN ('citus_split_test_schema_no_deferred_drop')
)
ORDER BY stxname ASC;
stxname
---------------------------------------------------------------------
stats_on_sensors
stats_on_sensors_8981013
stats_on_sensors_8981014
(3 rows)
\c - - - :worker_2_port
SET search_path TO "citus_split_test_schema_no_deferred_drop", public, pg_catalog;
SET citus.show_shards_for_app_name_prefixes = '*';
SELECT tbl.relname, fk."Constraint", fk."Definition"
FROM pg_catalog.pg_class tbl
JOIN public.table_fkeys fk on tbl.oid = fk.relid
WHERE tbl.relname like 'sensors_%'
ORDER BY 1, 2;
relname | Constraint | Definition
---------------------------------------------------------------------
sensors_8981007 | fkey_table_to_dist_8981007 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981009(measureid)
sensors_8981008 | fkey_table_to_dist_8981008 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981010(measureid)
sensors_8981015 | fkey_table_to_dist_8981015 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981018(measureid)
(3 rows)
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2;
tablename | indexdef
---------------------------------------------------------------------
sensors_8981007 | CREATE INDEX hash_index_on_sensors_8981007 ON citus_split_test_schema_no_deferred_drop.sensors_8981007 USING hash (((measure_data -> 'IsFailed'::text)))
sensors_8981007 | CREATE INDEX index_on_sensors_8981007 ON citus_split_test_schema_no_deferred_drop.sensors_8981007 USING btree (lower((measureid)::text))
sensors_8981007 | CREATE INDEX index_with_include_on_sensors_8981007 ON citus_split_test_schema_no_deferred_drop.sensors_8981007 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status)
sensors_8981007 | CREATE UNIQUE INDEX sensors_pkey_8981007 ON citus_split_test_schema_no_deferred_drop.sensors_8981007 USING btree (measureid, eventdatetime, measure_data)
sensors_8981008 | CREATE INDEX hash_index_on_sensors_8981008 ON citus_split_test_schema_no_deferred_drop.sensors_8981008 USING hash (((measure_data -> 'IsFailed'::text)))
sensors_8981008 | CREATE INDEX index_on_sensors_8981008 ON citus_split_test_schema_no_deferred_drop.sensors_8981008 USING btree (lower((measureid)::text))
sensors_8981008 | CREATE INDEX index_with_include_on_sensors_8981008 ON citus_split_test_schema_no_deferred_drop.sensors_8981008 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status)
sensors_8981008 | CREATE UNIQUE INDEX sensors_pkey_8981008 ON citus_split_test_schema_no_deferred_drop.sensors_8981008 USING btree (measureid, eventdatetime, measure_data)
sensors_8981015 | CREATE INDEX hash_index_on_sensors_8981015 ON citus_split_test_schema_no_deferred_drop.sensors_8981015 USING hash (((measure_data -> 'IsFailed'::text)))
sensors_8981015 | CREATE INDEX index_on_sensors_8981015 ON citus_split_test_schema_no_deferred_drop.sensors_8981015 USING btree (lower((measureid)::text))
sensors_8981015 | CREATE INDEX index_with_include_on_sensors_8981015 ON citus_split_test_schema_no_deferred_drop.sensors_8981015 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status)
sensors_8981015 | CREATE UNIQUE INDEX sensors_pkey_8981015 ON citus_split_test_schema_no_deferred_drop.sensors_8981015 USING btree (measureid, eventdatetime, measure_data)
(12 rows)
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2;
tablename | indexdef
---------------------------------------------------------------------
table_with_index_rep_identity_8981011 | CREATE UNIQUE INDEX uqx_8981011 ON citus_split_test_schema_no_deferred_drop.table_with_index_rep_identity_8981011 USING btree (key)
table_with_index_rep_identity_8981012 | CREATE UNIQUE INDEX uqx_8981012 ON citus_split_test_schema_no_deferred_drop.table_with_index_rep_identity_8981012 USING btree (key)
table_with_index_rep_identity_8981021 | CREATE UNIQUE INDEX uqx_8981021 ON citus_split_test_schema_no_deferred_drop.table_with_index_rep_identity_8981021 USING btree (key)
(3 rows)
SELECT stxname FROM pg_statistic_ext
WHERE stxnamespace IN (
SELECT oid
FROM pg_namespace
WHERE nspname IN ('citus_split_test_schema_no_deferred_drop')
)
ORDER BY stxname ASC;
stxname
---------------------------------------------------------------------
stats_on_sensors
stats_on_sensors_8981007
stats_on_sensors_8981008
stats_on_sensors_8981015
(4 rows)
-- END : Display current state
-- BEGIN: Should be able to change/drop constraints
\c - postgres - :master_port
SET citus.defer_drop_after_shard_split TO OFF;
SET ROLE test_split_deferred_role;
SET search_path TO "citus_split_test_schema_no_deferred_drop";
ALTER INDEX index_on_sensors RENAME TO index_on_sensors_renamed;
ALTER INDEX index_on_sensors_renamed ALTER COLUMN 1 SET STATISTICS 200;
DROP STATISTICS stats_on_sensors;
DROP INDEX index_on_sensors_renamed;
ALTER TABLE sensors DROP CONSTRAINT fkey_table_to_dist;
-- END: Should be able to change/drop constraints
-- BEGIN: Split second time on another schema
SET search_path TO public;
SET citus.next_shard_id TO 8981031;
SELECT pg_catalog.citus_split_shard_by_split_points(
8981007,
ARRAY['-2100000000'],
ARRAY[:worker_1_node, :worker_2_node],
'block_writes');
citus_split_shard_by_split_points
---------------------------------------------------------------------
(1 row)
SET search_path TO "citus_split_test_schema_no_deferred_drop";
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport
FROM pg_dist_shard AS shard
INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid
INNER JOIN pg_dist_node node ON placement.groupid = node.groupid
INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid
WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass)
ORDER BY logicalrelid, shardminvalue::BIGINT;
shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport
---------------------------------------------------------------------
8981031 | sensors | -2147483648 | -2100000000 | localhost | 57637
8981032 | sensors | -2099999999 | -1073741824 | localhost | 57638
8981008 | sensors | -1073741823 | -1 | localhost | 57638
8981013 | sensors | 0 | 536870911 | localhost | 57637
8981014 | sensors | 536870912 | 1610612735 | localhost | 57637
8981015 | sensors | 1610612736 | 2147483647 | localhost | 57638
8981033 | colocated_dist_table | -2147483648 | -2100000000 | localhost | 57637
8981034 | colocated_dist_table | -2099999999 | -1073741824 | localhost | 57638
8981010 | colocated_dist_table | -1073741823 | -1 | localhost | 57638
8981016 | colocated_dist_table | 0 | 536870911 | localhost | 57637
8981017 | colocated_dist_table | 536870912 | 1610612735 | localhost | 57637
8981018 | colocated_dist_table | 1610612736 | 2147483647 | localhost | 57638
8981035 | table_with_index_rep_identity | -2147483648 | -2100000000 | localhost | 57637
8981036 | table_with_index_rep_identity | -2099999999 | -1073741824 | localhost | 57638
8981012 | table_with_index_rep_identity | -1073741823 | -1 | localhost | 57638
8981019 | table_with_index_rep_identity | 0 | 536870911 | localhost | 57637
8981020 | table_with_index_rep_identity | 536870912 | 1610612735 | localhost | 57637
8981021 | table_with_index_rep_identity | 1610612736 | 2147483647 | localhost | 57638
(18 rows)
-- END: Split second time on another schema
-- BEGIN: Validate Data Count
SELECT COUNT(*) FROM sensors;
count
---------------------------------------------------------------------
1001
(1 row)
SELECT COUNT(*) FROM reference_table;
count
---------------------------------------------------------------------
1001
(1 row)
SELECT COUNT(*) FROM colocated_dist_table;
count
---------------------------------------------------------------------
1001
(1 row)
-- END: Validate Data Count
--BEGIN : Cleanup
\c - postgres - :master_port
DROP SCHEMA "citus_split_test_schema_no_deferred_drop" CASCADE;
NOTICE: drop cascades to 4 other objects
DETAIL: drop cascades to table citus_split_test_schema_no_deferred_drop.sensors
drop cascades to table citus_split_test_schema_no_deferred_drop.reference_table
drop cascades to table citus_split_test_schema_no_deferred_drop.colocated_dist_table
drop cascades to table citus_split_test_schema_no_deferred_drop.table_with_index_rep_identity
SET citus.defer_drop_after_shard_split TO ON;
--END : Cleanup

View File

@ -159,48 +159,6 @@ SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_1
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: canceling statement due to user request ERROR: canceling statement due to user request
-- failure on dropping old colocated shard
-- Disable deferred drop otherwise we will skip the drop and operation will succeed instead of failing.
SET citus.defer_drop_after_shard_split TO OFF;
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
-- cancellation on dropping old colocated shard
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").cancel(' || :pid || ')');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: canceling statement due to user request
-- failure on dropping old shard
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
-- cancellation on dropping old shard
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").cancel(' || :pid || ')');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
ERROR: canceling statement due to user request
-- Re-enable deferred drop for rest of the tests.
SET citus.defer_drop_after_shard_split TO ON;
-- failure on foreign key creation -- failure on foreign key creation
SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey FOREIGN KEY").kill()'); SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey FOREIGN KEY").kill()');
mitmproxy mitmproxy

View File

@ -311,49 +311,6 @@ SELECT citus.mitmproxy('conn.onQuery(query="select pg_drop_replication_slot").ca
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: canceling statement due to user request ERROR: canceling statement due to user request
-- failure on dropping old shard
-- failure on dropping old colocated shard
-- Disable deferred drop otherwise we will skip the drop and operation will succeed instead of failing.
SET citus.defer_drop_after_shard_split TO OFF;
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
-- cancellation on dropping old shard
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").cancel(' || :pid || ')');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: canceling statement due to user request
-- failure on dropping old colocated shard
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
-- cancellation on dropping old colocated shard
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").cancel(' || :pid || ')');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
ERROR: canceling statement due to user request
-- Re-enable deferred drop for rest of the tests.
SET citus.defer_drop_after_shard_split TO ON;
-- failure on foreign key creation -- failure on foreign key creation
SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey FOREIGN KEY").kill()'); SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey FOREIGN KEY").kill()');
mitmproxy mitmproxy

View File

@ -187,35 +187,3 @@ run_try_drop_marked_shards
step s1-commit: step s1-commit:
COMMIT; COMMIT;
starting permutation: s1-begin s2-begin s2-select s1-move-placement-without-deferred s2-commit s1-commit
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s2-select:
SELECT COUNT(*) FROM t1;
count
---------------------------------------------------------------------
0
(1 row)
step s1-move-placement-without-deferred:
SET citus.defer_drop_after_shard_move TO OFF;
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
<waiting ...>
step s2-commit:
COMMIT;
step s1-move-placement-without-deferred: <... completed>
master_move_shard_placement
---------------------------------------------------------------------
(1 row)
step s1-commit:
COMMIT;

View File

@ -1560,7 +1560,6 @@ CREATE EXTENSION citus;
CREATE TABLE test (x int, y int); CREATE TABLE test (x int, y int);
INSERT INTO test VALUES (1,2); INSERT INTO test VALUES (1,2);
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
SET citus.defer_drop_after_shard_split TO off;
SELECT create_distributed_table_concurrently('test','x'); SELECT create_distributed_table_concurrently('test','x');
NOTICE: relation test does not have a REPLICA IDENTITY or PRIMARY KEY NOTICE: relation test does not have a REPLICA IDENTITY or PRIMARY KEY
DETAIL: UPDATE and DELETE commands on the relation will error out during create_distributed_table_concurrently unless there is a REPLICA IDENTITY or PRIMARY KEY. INSERT commands will still work. DETAIL: UPDATE and DELETE commands on the relation will error out during create_distributed_table_concurrently unless there is a REPLICA IDENTITY or PRIMARY KEY. INSERT commands will still work.

View File

@ -798,84 +798,6 @@ $$;
RESET citus.enable_metadata_sync; RESET citus.enable_metadata_sync;
CREATE EVENT TRIGGER abort_drop ON sql_drop CREATE EVENT TRIGGER abort_drop ON sql_drop
EXECUTE PROCEDURE abort_drop_command(); EXECUTE PROCEDURE abort_drop_command();
\c - postgres - :master_port
-- Disable deferred drop otherwise we will skip the drop and operation will succeed instead of failing.
SET citus.defer_drop_after_shard_split TO OFF;
SET ROLE mx_isolation_role_ent;
SET search_path to "Tenant Isolation";
\set VERBOSITY terse
SELECT isolate_tenant_to_new_shard('orders_streaming', 104, 'CASCADE', shard_transfer_mode => 'block_writes');
WARNING: command DROP TABLE is disabled
WARNING: command DROP TABLE is disabled
WARNING: command DROP TABLE is disabled
WARNING: command DROP TABLE is disabled
WARNING: command DROP TABLE is disabled
WARNING: command DROP TABLE is disabled
WARNING: failed to clean up 6 orphaned shards out of 6 after a isolate_tenant_to_new_shard operation failed
ERROR: command DROP TABLE is disabled
\set VERBOSITY default
-- check if metadata is changed
SELECT * FROM pg_dist_shard
WHERE logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass
ORDER BY shardminvalue::BIGINT, logicalrelid;
logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue
---------------------------------------------------------------------
lineitem_streaming | 1230040 | t | -2147483648 | -2147483648
orders_streaming | 1230042 | t | -2147483648 | -2147483648
lineitem_streaming | 1230041 | t | -2147483647 | -136164586
orders_streaming | 1230043 | t | -2147483647 | -136164586
lineitem_streaming | 1230035 | t | -136164585 | -136164585
orders_streaming | 1230038 | t | -136164585 | -136164585
lineitem_streaming | 1230036 | t | -136164584 | -85071815
orders_streaming | 1230039 | t | -136164584 | -85071815
lineitem_streaming | 1230011 | t | -85071814 | -85071814
orders_streaming | 1230014 | t | -85071814 | -85071814
lineitem_streaming | 1230012 | t | -85071813 | -1
orders_streaming | 1230015 | t | -85071813 | -1
lineitem_streaming | 1230004 | t | 0 | 108199380
orders_streaming | 1230007 | t | 0 | 108199380
lineitem_streaming | 1230005 | t | 108199381 | 108199381
orders_streaming | 1230008 | t | 108199381 | 108199381
lineitem_streaming | 1230028 | t | 108199382 | 412880111
orders_streaming | 1230031 | t | 108199382 | 412880111
lineitem_streaming | 1230029 | t | 412880112 | 412880112
orders_streaming | 1230032 | t | 412880112 | 412880112
lineitem_streaming | 1230044 | t | 412880113 | 2147483646
orders_streaming | 1230046 | t | 412880113 | 2147483646
lineitem_streaming | 1230045 | t | 2147483647 | 2147483647
orders_streaming | 1230047 | t | 2147483647 | 2147483647
(24 rows)
\c - - - :worker_1_port
SET search_path to "Tenant Isolation";
-- however, new tables are already created
SET citus.override_table_visibility TO false;
\d
List of relations
Schema | Name | Type | Owner
---------------------------------------------------------------------
Tenant Isolation | lineitem_streaming | table | mx_isolation_role_ent
Tenant Isolation | lineitem_streaming_1230011 | table | mx_isolation_role_ent
Tenant Isolation | lineitem_streaming_1230012 | table | mx_isolation_role_ent
Tenant Isolation | lineitem_streaming_1230035 | table | mx_isolation_role_ent
Tenant Isolation | lineitem_streaming_1230036 | table | mx_isolation_role_ent
Tenant Isolation | lineitem_streaming_1230040 | table | mx_isolation_role_ent
Tenant Isolation | lineitem_streaming_1230041 | table | mx_isolation_role_ent
Tenant Isolation | lineitem_streaming_1230056 | table | mx_isolation_role_ent
Tenant Isolation | lineitem_streaming_1230057 | table | mx_isolation_role_ent
Tenant Isolation | lineitem_streaming_1230058 | table | mx_isolation_role_ent
Tenant Isolation | orders_streaming | table | mx_isolation_role_ent
Tenant Isolation | orders_streaming_1230014 | table | mx_isolation_role_ent
Tenant Isolation | orders_streaming_1230015 | table | mx_isolation_role_ent
Tenant Isolation | orders_streaming_1230038 | table | mx_isolation_role_ent
Tenant Isolation | orders_streaming_1230039 | table | mx_isolation_role_ent
Tenant Isolation | orders_streaming_1230042 | table | mx_isolation_role_ent
Tenant Isolation | orders_streaming_1230043 | table | mx_isolation_role_ent
Tenant Isolation | orders_streaming_1230059 | table | mx_isolation_role_ent
Tenant Isolation | orders_streaming_1230060 | table | mx_isolation_role_ent
Tenant Isolation | orders_streaming_1230061 | table | mx_isolation_role_ent
(20 rows)
\c - postgres - :worker_1_port \c - postgres - :worker_1_port
DROP EVENT TRIGGER abort_drop; DROP EVENT TRIGGER abort_drop;
\c - mx_isolation_role_ent - :master_port \c - mx_isolation_role_ent - :master_port
@ -1085,7 +1007,7 @@ INSERT INTO test_colocated_table_3 SELECT i, i FROM generate_series (0, 100) i;
SELECT isolate_tenant_to_new_shard('test_colocated_table_2', 1, 'CASCADE', shard_transfer_mode => 'block_writes'); SELECT isolate_tenant_to_new_shard('test_colocated_table_2', 1, 'CASCADE', shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard isolate_tenant_to_new_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
1230095 1230089
(1 row) (1 row)
SELECT count(*) FROM test_colocated_table_2; SELECT count(*) FROM test_colocated_table_2;
@ -1096,7 +1018,7 @@ SELECT count(*) FROM test_colocated_table_2;
\c - postgres - :master_port \c - postgres - :master_port
CALL pg_catalog.citus_cleanup_orphaned_resources(); CALL pg_catalog.citus_cleanup_orphaned_resources();
NOTICE: cleaned up 10 orphaned resources NOTICE: cleaned up 4 orphaned resources
\c - postgres - :worker_1_port \c - postgres - :worker_1_port
-- show the foreign keys of the main table & its colocated shard on other tables -- show the foreign keys of the main table & its colocated shard on other tables
SELECT tbl.relname, fk."Constraint", fk."Definition" SELECT tbl.relname, fk."Constraint", fk."Definition"
@ -1107,47 +1029,47 @@ ORDER BY 1, 2;
relname | Constraint | Definition relname | Constraint | Definition
--------------------------------------------------------------------- ---------------------------------------------------------------------
test_colocated_table_1 | test_colocated_table_1_id_fkey | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1(id) test_colocated_table_1 | test_colocated_table_1_id_fkey | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1(id)
test_colocated_table_1_1230069 | test_colocated_table_1_id_fkey_1230069 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230069(id) test_colocated_table_1_1230063 | test_colocated_table_1_id_fkey_1230063 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230063(id)
test_colocated_table_1_1230071 | test_colocated_table_1_id_fkey_1230071 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230071(id) test_colocated_table_1_1230065 | test_colocated_table_1_id_fkey_1230065 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230065(id)
test_colocated_table_1_1230073 | test_colocated_table_1_id_fkey_1230073 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230073(id) test_colocated_table_1_1230067 | test_colocated_table_1_id_fkey_1230067 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230067(id)
test_colocated_table_1_1230091 | test_colocated_table_1_id_fkey_1230091 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230091(id) test_colocated_table_1_1230085 | test_colocated_table_1_id_fkey_1230085 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230085(id)
test_colocated_table_1_1230092 | test_colocated_table_1_id_fkey_1230092 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230092(id) test_colocated_table_1_1230086 | test_colocated_table_1_id_fkey_1230086 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230086(id)
test_colocated_table_1_1230093 | test_colocated_table_1_id_fkey_1230093 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230093(id) test_colocated_table_1_1230087 | test_colocated_table_1_id_fkey_1230087 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230087(id)
test_colocated_table_2 | test_colocated_table_2_id_fkey | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1(id) test_colocated_table_2 | test_colocated_table_2_id_fkey | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1(id)
test_colocated_table_2 | test_colocated_table_2_value_1_fkey | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey(id) test_colocated_table_2 | test_colocated_table_2_value_1_fkey | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey(id)
test_colocated_table_2_1230077 | test_colocated_table_2_id_fkey_1230077 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230069(id) test_colocated_table_2_1230071 | test_colocated_table_2_id_fkey_1230071 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230063(id)
test_colocated_table_2_1230077 | test_colocated_table_2_value_1_fkey_1230077 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230066(id) test_colocated_table_2_1230071 | test_colocated_table_2_value_1_fkey_1230071 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230060(id)
test_colocated_table_2_1230079 | test_colocated_table_2_id_fkey_1230079 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230071(id) test_colocated_table_2_1230073 | test_colocated_table_2_id_fkey_1230073 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230065(id)
test_colocated_table_2_1230079 | test_colocated_table_2_value_1_fkey_1230079 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230066(id) test_colocated_table_2_1230073 | test_colocated_table_2_value_1_fkey_1230073 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230060(id)
test_colocated_table_2_1230081 | test_colocated_table_2_id_fkey_1230081 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230073(id) test_colocated_table_2_1230075 | test_colocated_table_2_id_fkey_1230075 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230067(id)
test_colocated_table_2_1230081 | test_colocated_table_2_value_1_fkey_1230081 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230066(id) test_colocated_table_2_1230075 | test_colocated_table_2_value_1_fkey_1230075 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230060(id)
test_colocated_table_2_1230094 | test_colocated_table_2_id_fkey_1230094 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230091(id) test_colocated_table_2_1230088 | test_colocated_table_2_id_fkey_1230088 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230085(id)
test_colocated_table_2_1230094 | test_colocated_table_2_value_1_fkey_1230094 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230066(id) test_colocated_table_2_1230088 | test_colocated_table_2_value_1_fkey_1230088 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230060(id)
test_colocated_table_2_1230095 | test_colocated_table_2_id_fkey_1230095 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230092(id) test_colocated_table_2_1230089 | test_colocated_table_2_id_fkey_1230089 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230086(id)
test_colocated_table_2_1230095 | test_colocated_table_2_value_1_fkey_1230095 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230066(id) test_colocated_table_2_1230089 | test_colocated_table_2_value_1_fkey_1230089 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230060(id)
test_colocated_table_2_1230096 | test_colocated_table_2_id_fkey_1230096 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230093(id) test_colocated_table_2_1230090 | test_colocated_table_2_id_fkey_1230090 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230087(id)
test_colocated_table_2_1230096 | test_colocated_table_2_value_1_fkey_1230096 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230066(id) test_colocated_table_2_1230090 | test_colocated_table_2_value_1_fkey_1230090 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230060(id)
test_colocated_table_3 | test_colocated_table_3_id_fkey | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1(id) test_colocated_table_3 | test_colocated_table_3_id_fkey | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1(id)
test_colocated_table_3 | test_colocated_table_3_id_fkey1 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2(id) test_colocated_table_3 | test_colocated_table_3_id_fkey1 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2(id)
test_colocated_table_3 | test_colocated_table_3_value_1_fkey | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey(id) test_colocated_table_3 | test_colocated_table_3_value_1_fkey | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey(id)
test_colocated_table_3_1230085 | test_colocated_table_3_id_fkey1_1230085 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2_1230077(id) test_colocated_table_3_1230079 | test_colocated_table_3_id_fkey1_1230079 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2_1230071(id)
test_colocated_table_3_1230085 | test_colocated_table_3_id_fkey_1230085 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230069(id) test_colocated_table_3_1230079 | test_colocated_table_3_id_fkey_1230079 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230063(id)
test_colocated_table_3_1230085 | test_colocated_table_3_value_1_fkey_1230085 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230066(id) test_colocated_table_3_1230079 | test_colocated_table_3_value_1_fkey_1230079 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230060(id)
test_colocated_table_3_1230087 | test_colocated_table_3_id_fkey1_1230087 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2_1230079(id) test_colocated_table_3_1230081 | test_colocated_table_3_id_fkey1_1230081 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2_1230073(id)
test_colocated_table_3_1230087 | test_colocated_table_3_id_fkey_1230087 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230071(id) test_colocated_table_3_1230081 | test_colocated_table_3_id_fkey_1230081 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230065(id)
test_colocated_table_3_1230087 | test_colocated_table_3_value_1_fkey_1230087 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230066(id) test_colocated_table_3_1230081 | test_colocated_table_3_value_1_fkey_1230081 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230060(id)
test_colocated_table_3_1230089 | test_colocated_table_3_id_fkey1_1230089 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2_1230081(id) test_colocated_table_3_1230083 | test_colocated_table_3_id_fkey1_1230083 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2_1230075(id)
test_colocated_table_3_1230089 | test_colocated_table_3_id_fkey_1230089 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230073(id) test_colocated_table_3_1230083 | test_colocated_table_3_id_fkey_1230083 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230067(id)
test_colocated_table_3_1230089 | test_colocated_table_3_value_1_fkey_1230089 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230066(id) test_colocated_table_3_1230083 | test_colocated_table_3_value_1_fkey_1230083 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230060(id)
test_colocated_table_3_1230097 | test_colocated_table_3_id_fkey1_1230097 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2_1230094(id) test_colocated_table_3_1230091 | test_colocated_table_3_id_fkey1_1230091 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2_1230088(id)
test_colocated_table_3_1230097 | test_colocated_table_3_id_fkey_1230097 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230091(id) test_colocated_table_3_1230091 | test_colocated_table_3_id_fkey_1230091 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230085(id)
test_colocated_table_3_1230097 | test_colocated_table_3_value_1_fkey_1230097 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230066(id) test_colocated_table_3_1230091 | test_colocated_table_3_value_1_fkey_1230091 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230060(id)
test_colocated_table_3_1230098 | test_colocated_table_3_id_fkey1_1230098 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2_1230095(id) test_colocated_table_3_1230092 | test_colocated_table_3_id_fkey1_1230092 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2_1230089(id)
test_colocated_table_3_1230098 | test_colocated_table_3_id_fkey_1230098 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230092(id) test_colocated_table_3_1230092 | test_colocated_table_3_id_fkey_1230092 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230086(id)
test_colocated_table_3_1230098 | test_colocated_table_3_value_1_fkey_1230098 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230066(id) test_colocated_table_3_1230092 | test_colocated_table_3_value_1_fkey_1230092 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230060(id)
test_colocated_table_3_1230099 | test_colocated_table_3_id_fkey1_1230099 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2_1230096(id) test_colocated_table_3_1230093 | test_colocated_table_3_id_fkey1_1230093 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2_1230090(id)
test_colocated_table_3_1230099 | test_colocated_table_3_id_fkey_1230099 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230093(id) test_colocated_table_3_1230093 | test_colocated_table_3_id_fkey_1230093 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230087(id)
test_colocated_table_3_1230099 | test_colocated_table_3_value_1_fkey_1230099 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230066(id) test_colocated_table_3_1230093 | test_colocated_table_3_value_1_fkey_1230093 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230060(id)
(42 rows) (42 rows)
\c - mx_isolation_role_ent - :master_port \c - mx_isolation_role_ent - :master_port

View File

@ -363,7 +363,6 @@ SELECT COUNT(*) FROM pg_dist_transaction;
(1 row) (1 row)
BEGIN; BEGIN;
SET LOCAL citus.defer_drop_after_shard_move TO OFF;
SELECT citus_move_shard_placement((SELECT * FROM selected_shard), 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode := 'block_writes'); SELECT citus_move_shard_placement((SELECT * FROM selected_shard), 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode := 'block_writes');
citus_move_shard_placement citus_move_shard_placement
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -4,7 +4,6 @@
SET citus.next_shard_id TO 20000000; SET citus.next_shard_id TO 20000000;
SET citus.shard_count TO 6; SET citus.shard_count TO 6;
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
SET citus.defer_drop_after_shard_move TO on;
CREATE SCHEMA shard_move_deferred_delete; CREATE SCHEMA shard_move_deferred_delete;
SET search_path TO shard_move_deferred_delete; SET search_path TO shard_move_deferred_delete;
CREATE TABLE t1 ( id int PRIMARY KEY); CREATE TABLE t1 ( id int PRIMARY KEY);

View File

@ -65,12 +65,6 @@ step "s1-move-placement-back"
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57638, 'localhost', 57637); SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57638, 'localhost', 57637);
} }
step "s1-move-placement-without-deferred" {
SET citus.defer_drop_after_shard_move TO OFF;
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
}
step "s1-drop-marked-shards" step "s1-drop-marked-shards"
{ {
SET client_min_messages to NOTICE; SET client_min_messages to NOTICE;
@ -88,10 +82,6 @@ step "s1-commit"
session "s2" session "s2"
step "s2-begin" {
BEGIN;
}
step "s2-drop-old-shards" { step "s2-drop-old-shards" {
SELECT run_try_drop_marked_shards(); SELECT run_try_drop_marked_shards();
} }
@ -112,20 +102,12 @@ step "s2-lock-table-on-worker"
SELECT run_commands_on_session_level_connection_to_node('LOCK TABLE t1_120000'); SELECT run_commands_on_session_level_connection_to_node('LOCK TABLE t1_120000');
} }
step "s2-select" {
SELECT COUNT(*) FROM t1;
}
step "s2-drop-marked-shards" step "s2-drop-marked-shards"
{ {
SET client_min_messages to DEBUG1; SET client_min_messages to DEBUG1;
CALL isolation_cleanup_orphaned_shards(); CALL isolation_cleanup_orphaned_shards();
} }
step "s2-commit" {
COMMIT;
}
permutation "s1-begin" "s1-move-placement" "s1-drop-marked-shards" "s2-drop-marked-shards" "s1-commit" permutation "s1-begin" "s1-move-placement" "s1-drop-marked-shards" "s2-drop-marked-shards" "s1-commit"
permutation "s1-begin" "s1-move-placement" "s2-drop-marked-shards" "s1-drop-marked-shards" "s1-commit" permutation "s1-begin" "s1-move-placement" "s2-drop-marked-shards" "s1-drop-marked-shards" "s1-commit"
@ -134,4 +116,3 @@ permutation "s1-begin" "s1-move-placement" "s2-start-session-level-connection" "
permutation "s1-begin" "s1-move-placement" "s2-start-session-level-connection" "s2-lock-table-on-worker" "s1-commit" "s1-begin" "s1-move-placement-back" "s1-commit" "s2-stop-connection" permutation "s1-begin" "s1-move-placement" "s2-start-session-level-connection" "s2-lock-table-on-worker" "s1-commit" "s1-begin" "s1-move-placement-back" "s1-commit" "s2-stop-connection"
// make sure we error if we cannot get the lock on pg_dist_placement // make sure we error if we cannot get the lock on pg_dist_placement
permutation "s1-begin" "s1-lock-pg-dist-placement" "s2-drop-old-shards" "s1-commit" permutation "s1-begin" "s1-lock-pg-dist-placement" "s2-drop-old-shards" "s1-commit"
permutation "s1-begin" "s2-begin" "s2-select" "s1-move-placement-without-deferred" "s2-commit" "s1-commit"

View File

@ -17,12 +17,10 @@ test: worker_split_binary_copy_test
test: worker_split_text_copy_test test: worker_split_text_copy_test
test: citus_split_shard_by_split_points_negative test: citus_split_shard_by_split_points_negative
test: citus_split_shard_by_split_points test: citus_split_shard_by_split_points
test: citus_split_shard_no_deferred_drop
test: citus_split_shard_by_split_points_failure test: citus_split_shard_by_split_points_failure
# Name citus_split_shard_by_split_points_columnar_partitioned was too long and being truncated. # Name citus_split_shard_by_split_points_columnar_partitioned was too long and being truncated.
# use citus_split_shard_columnar_partitioned instead. # use citus_split_shard_columnar_partitioned instead.
test: citus_split_shard_columnar_partitioned test: citus_split_shard_columnar_partitioned
test: citus_non_blocking_split_shards test: citus_non_blocking_split_shards
test: citus_non_blocking_splits_no_deferred_drop
test: citus_non_blocking_split_shard_cleanup test: citus_non_blocking_split_shard_cleanup
test: citus_non_blocking_split_columnar test: citus_non_blocking_split_columnar

View File

@ -32,7 +32,6 @@ INSERT INTO sensors SELECT i, '2020-01-05', '{}', 11011.10, 'A', 'I <3 Citus' FR
\c - postgres - :master_port \c - postgres - :master_port
SET search_path TO "citus_split_test_schema"; SET search_path TO "citus_split_test_schema";
SET citus.next_shard_id TO 8981007; SET citus.next_shard_id TO 8981007;
SET citus.defer_drop_after_shard_move TO OFF;
-- BEGIN : Set node id variables -- BEGIN : Set node id variables
SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset

View File

@ -129,11 +129,12 @@ SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, node
SET ROLE test_shard_split_role; SET ROLE test_shard_split_role;
SET search_path TO "citus_split_test_schema"; SET search_path TO "citus_split_test_schema";
SET citus.next_shard_id TO 8981007; SET citus.next_shard_id TO 8981007;
SET citus.defer_drop_after_shard_move TO OFF;
SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical');
-- END : Move one shard before we split it. -- END : Move one shard before we split it.
CALL pg_catalog.citus_cleanup_orphaned_shards();
-- BEGIN : Set node id variables -- BEGIN : Set node id variables
SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset
SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset
@ -167,6 +168,8 @@ CALL pg_catalog.citus_cleanup_orphaned_resources();
SELECT citus_move_shard_placement(8981007, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='block_writes'); SELECT citus_move_shard_placement(8981007, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='block_writes');
-- END : Move a shard post split. -- END : Move a shard post split.
CALL pg_catalog.citus_cleanup_orphaned_shards();
-- BEGIN : Display current state. -- BEGIN : Display current state.
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport
FROM pg_dist_shard AS shard FROM pg_dist_shard AS shard

View File

@ -1,294 +0,0 @@
/*
This suite runs without deferred drop enabled.
Citus Shard Split Test.The test is model similar to 'shard_move_constraints'.
Here is a high level overview of test plan:
1. Create a table 'sensors' (ShardCount = 2) to be split. Add indexes and statistics on this table.
2. Create two other tables: 'reference_table' and 'colocated_dist_table', co-located with sensors.
3. Create Foreign key constraints between the two co-located distributed tables.
4. Load data into the three tables.
5. Move one of the shards for 'sensors' to test ShardMove -> Split.
6. Trigger Split on both shards of 'sensors'. This will also split co-located tables.
7. Move one of the split shard to test Split -> ShardMove.
8. Split an already split shard second time on a different schema.
9. Create a colocated table with no replica identity.
10. Show we do not allow Split with the shard transfer mode 'auto' if any colocated table has no replica identity.
11. Drop the colocated table with no replica identity.
12. Show we allow Split with the shard transfer mode 'auto' if all colocated tables has replica identity.
*/
CREATE SCHEMA "citus_split_test_schema_no_deferred_drop";
SET citus.defer_drop_after_shard_split TO OFF;
CREATE ROLE test_shard_split_role_nodeferred_drop WITH LOGIN;
GRANT USAGE, CREATE ON SCHEMA "citus_split_test_schema_no_deferred_drop" TO test_shard_split_role_nodeferred_drop;
SET ROLE test_shard_split_role_nodeferred_drop;
SET search_path TO "citus_split_test_schema_no_deferred_drop";
SET citus.next_shard_id TO 8981000;
SET citus.next_placement_id TO 8610000;
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 1;
-- BEGIN: Create table to split, along with other co-located tables. Add indexes, statistics etc.
CREATE TABLE sensors(
measureid integer,
eventdatetime date,
measure_data jsonb,
meaure_quantity decimal(15, 2),
measure_status char(1),
measure_comment varchar(44),
PRIMARY KEY (measureid, eventdatetime, measure_data));
CREATE INDEX index_on_sensors ON sensors(lower(measureid::text));
ALTER INDEX index_on_sensors ALTER COLUMN 1 SET STATISTICS 1000;
CREATE INDEX hash_index_on_sensors ON sensors USING HASH((measure_data->'IsFailed'));
CREATE INDEX index_with_include_on_sensors ON sensors ((measure_data->'IsFailed')) INCLUDE (measure_data, eventdatetime, measure_status);
CREATE STATISTICS stats_on_sensors (dependencies) ON measureid, eventdatetime FROM sensors;
SELECT create_distributed_table('sensors', 'measureid', colocate_with:='none');
-- END: Create table to split, along with other co-located tables. Add indexes, statistics etc.
-- BEGIN: Create co-located distributed and reference tables.
CREATE TABLE reference_table (measureid integer PRIMARY KEY);
SELECT create_reference_table('reference_table');
CREATE TABLE colocated_dist_table (measureid integer PRIMARY KEY);
CLUSTER colocated_dist_table USING colocated_dist_table_pkey;
SELECT create_distributed_table('colocated_dist_table', 'measureid', colocate_with:='sensors');
CREATE TABLE table_with_index_rep_identity(key int NOT NULL);
CREATE UNIQUE INDEX uqx ON table_with_index_rep_identity(key);
ALTER TABLE table_with_index_rep_identity REPLICA IDENTITY USING INDEX uqx;
CLUSTER table_with_index_rep_identity USING uqx;
SELECT create_distributed_table('table_with_index_rep_identity', 'key', colocate_with:='sensors');
-- END: Create co-located distributed and reference tables.
-- BEGIN : Create Foreign key constraints.
ALTER TABLE sensors ADD CONSTRAINT fkey_table_to_dist FOREIGN KEY (measureid) REFERENCES colocated_dist_table(measureid);
-- END : Create Foreign key constraints.
-- BEGIN : Load data into tables.
INSERT INTO reference_table SELECT i FROM generate_series(0,1000)i;
INSERT INTO colocated_dist_table SELECT i FROM generate_series(0,1000)i;
INSERT INTO sensors SELECT i, '2020-01-05', '{}', 11011.10, 'A', 'I <3 Citus' FROM generate_series(0,1000)i;
SELECT COUNT(*) FROM sensors;
SELECT COUNT(*) FROM reference_table;
SELECT COUNT(*) FROM colocated_dist_table;
-- END: Load data into tables.
-- BEGIN : Display current state.
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport
FROM pg_dist_shard AS shard
INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid
INNER JOIN pg_dist_node node ON placement.groupid = node.groupid
INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid
WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass)
ORDER BY logicalrelid, shardminvalue::BIGINT;
\c - - - :worker_1_port
SET search_path TO "citus_split_test_schema_no_deferred_drop", public, pg_catalog;
SET citus.show_shards_for_app_name_prefixes = '*';
SELECT tbl.relname, fk."Constraint", fk."Definition"
FROM pg_catalog.pg_class tbl
JOIN public.table_fkeys fk on tbl.oid = fk.relid
WHERE tbl.relname like 'sensors_%'
ORDER BY 1, 2;
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2;
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2;
SELECT stxname FROM pg_statistic_ext
WHERE stxnamespace IN (
SELECT oid
FROM pg_namespace
WHERE nspname IN ('citus_split_test_schema_no_deferred_drop')
)
ORDER BY stxname ASC;
\c - - - :worker_2_port
SET search_path TO "citus_split_test_schema_no_deferred_drop", public, pg_catalog;
SET citus.show_shards_for_app_name_prefixes = '*';
SELECT tbl.relname, fk."Constraint", fk."Definition"
FROM pg_catalog.pg_class tbl
JOIN public.table_fkeys fk on tbl.oid = fk.relid
WHERE tbl.relname like 'sensors_%'
ORDER BY 1, 2;
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2;
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2;
SELECT stxname FROM pg_statistic_ext
WHERE stxnamespace IN (
SELECT oid
FROM pg_namespace
WHERE nspname IN ('citus_split_test_schema_no_deferred_drop')
)
ORDER BY stxname ASC;
-- END : Display current state
-- BEGIN : Move one shard before we split it.
\c - postgres - :master_port
SET citus.defer_drop_after_shard_split TO OFF;
SET ROLE test_shard_split_role_nodeferred_drop;
SET search_path TO "citus_split_test_schema_no_deferred_drop";
SET citus.next_shard_id TO 8981007;
SET citus.defer_drop_after_shard_move TO OFF;
SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical');
-- END : Move one shard before we split it.
-- BEGIN : Set node id variables
SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset
SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset
-- END : Set node id variables
-- BEGIN : Split two shards : One with move and One without move.
-- Perform 2 way split
SELECT pg_catalog.citus_split_shard_by_split_points(
8981000,
ARRAY['-1073741824'],
ARRAY[:worker_1_node, :worker_2_node],
'force_logical');
-- Perform 3 way split
SELECT pg_catalog.citus_split_shard_by_split_points(
8981001,
ARRAY['536870911', '1610612735'],
ARRAY[:worker_1_node, :worker_1_node, :worker_2_node],
'force_logical');
-- END : Split two shards : One with move and One without move.
-- BEGIN : Move a shard post split.
SELECT citus_move_shard_placement(8981007, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='block_writes');
-- END : Move a shard post split.
-- BEGIN : Display current state.
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport
FROM pg_dist_shard AS shard
INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid
INNER JOIN pg_dist_node node ON placement.groupid = node.groupid
INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid
WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass)
ORDER BY logicalrelid, shardminvalue::BIGINT;
\c - - - :worker_1_port
SET search_path TO "citus_split_test_schema_no_deferred_drop", public, pg_catalog;
SET citus.show_shards_for_app_name_prefixes = '*';
SELECT tbl.relname, fk."Constraint", fk."Definition"
FROM pg_catalog.pg_class tbl
JOIN public.table_fkeys fk on tbl.oid = fk.relid
WHERE tbl.relname like 'sensors_%'
ORDER BY 1, 2;
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2;
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2;
SELECT stxname FROM pg_statistic_ext
WHERE stxnamespace IN (
SELECT oid
FROM pg_namespace
WHERE nspname IN ('citus_split_test_schema_no_deferred_drop')
)
ORDER BY stxname ASC;
\c - - - :worker_2_port
SET search_path TO "citus_split_test_schema_no_deferred_drop", public, pg_catalog;
SET citus.show_shards_for_app_name_prefixes = '*';
SELECT tbl.relname, fk."Constraint", fk."Definition"
FROM pg_catalog.pg_class tbl
JOIN public.table_fkeys fk on tbl.oid = fk.relid
WHERE tbl.relname like 'sensors_%'
ORDER BY 1, 2;
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2;
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2;
SELECT stxname FROM pg_statistic_ext
WHERE stxnamespace IN (
SELECT oid
FROM pg_namespace
WHERE nspname IN ('citus_split_test_schema_no_deferred_drop')
)
ORDER BY stxname ASC;
-- END : Display current state
-- BEGIN: Should be able to change/drop constraints
\c - postgres - :master_port
SET citus.defer_drop_after_shard_split TO OFF;
SET ROLE test_shard_split_role_nodeferred_drop;
SET search_path TO "citus_split_test_schema_no_deferred_drop";
ALTER INDEX index_on_sensors RENAME TO index_on_sensors_renamed;
ALTER INDEX index_on_sensors_renamed ALTER COLUMN 1 SET STATISTICS 200;
DROP STATISTICS stats_on_sensors;
DROP INDEX index_on_sensors_renamed;
ALTER TABLE sensors DROP CONSTRAINT fkey_table_to_dist;
-- END: Should be able to change/drop constraints
-- BEGIN: Split second time on another schema
SET search_path TO public;
SET citus.next_shard_id TO 8981031;
SELECT pg_catalog.citus_split_shard_by_split_points(
8981007,
ARRAY['-2100000000'],
ARRAY[:worker_1_node, :worker_2_node],
'force_logical');
SET search_path TO "citus_split_test_schema_no_deferred_drop";
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport
FROM pg_dist_shard AS shard
INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid
INNER JOIN pg_dist_node node ON placement.groupid = node.groupid
INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid
WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass)
ORDER BY logicalrelid, shardminvalue::BIGINT;
-- END: Split second time on another schema
-- BEGIN: Create a co-located table with no replica identity.
CREATE TABLE table_no_rep_id (measureid integer);
SELECT create_distributed_table('table_no_rep_id', 'measureid', colocate_with:='sensors');
-- END: Create a co-located table with no replica identity.
-- BEGIN: Split a shard with shard_transfer_mode='auto' and with a colocated table with no replica identity
SET citus.next_shard_id TO 8981041;
SELECT pg_catalog.citus_split_shard_by_split_points(
8981031,
ARRAY['-2120000000'],
ARRAY[:worker_1_node, :worker_2_node]);
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport
FROM pg_dist_shard AS shard
INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid
INNER JOIN pg_dist_node node ON placement.groupid = node.groupid
INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid
WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass)
ORDER BY logicalrelid, shardminvalue::BIGINT;
-- END: Split a shard with shard_transfer_mode='auto' and with a colocated table with no replica identity
-- BEGIN: Drop the co-located table with no replica identity.
DROP TABLE table_no_rep_id;
-- END: Drop the co-located table with no replica identity.
-- BEGIN: Split a shard with shard_transfer_mode='auto' and with all colocated tables has replica identity
SET citus.next_shard_id TO 8981041;
SELECT pg_catalog.citus_split_shard_by_split_points(
8981031,
ARRAY['-2120000000'],
ARRAY[:worker_1_node, :worker_2_node],
'auto');
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport
FROM pg_dist_shard AS shard
INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid
INNER JOIN pg_dist_node node ON placement.groupid = node.groupid
INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid
WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass)
ORDER BY logicalrelid, shardminvalue::BIGINT;
-- END: Split a shard with shard_transfer_mode='auto' and with all colocated tables has replica identity
-- BEGIN: Validate Data Count
SELECT COUNT(*) FROM sensors;
SELECT COUNT(*) FROM reference_table;
SELECT COUNT(*) FROM colocated_dist_table;
-- END: Validate Data Count
--BEGIN : Cleanup
\c - postgres - :master_port
DROP SCHEMA "citus_split_test_schema_no_deferred_drop" CASCADE;
SET citus.defer_drop_after_shard_split TO ON;
--END : Cleanup

View File

@ -125,10 +125,10 @@ SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, node
SET ROLE test_split_role; SET ROLE test_split_role;
SET search_path TO "citus_split_test_schema"; SET search_path TO "citus_split_test_schema";
SET citus.next_shard_id TO 8981007; SET citus.next_shard_id TO 8981007;
SET citus.defer_drop_after_shard_move TO OFF;
SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical');
-- END : Move one shard before we split it. -- END : Move one shard before we split it.
CALL citus_cleanup_orphaned_shards();
-- BEGIN : Set node id variables -- BEGIN : Set node id variables
SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset
@ -163,6 +163,8 @@ CALL pg_catalog.citus_cleanup_orphaned_resources();
SELECT citus_move_shard_placement(8981007, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='block_writes'); SELECT citus_move_shard_placement(8981007, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='block_writes');
-- END : Move a shard post split. -- END : Move a shard post split.
CALL citus_cleanup_orphaned_shards();
-- BEGIN : Display current state. -- BEGIN : Display current state.
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport
FROM pg_dist_shard AS shard FROM pg_dist_shard AS shard

View File

@ -1,246 +0,0 @@
/*
This suite runs without deferred drop enabled.
Citus Shard Split Test.The test is model similar to 'shard_move_constraints'.
Here is a high level overview of test plan:
1. Create a table 'sensors' (ShardCount = 2) to be split. Add indexes and statistics on this table.
2. Create two other tables: 'reference_table' and 'colocated_dist_table', co-located with sensors.
3. Create Foreign key constraints between the two co-located distributed tables.
4. Load data into the three tables.
5. Move one of the shards for 'sensors' to test ShardMove -> Split.
6. Trigger Split on both shards of 'sensors'. This will also split co-located tables.
7. Move one of the split shard to test Split -> ShardMove.
8. Split an already split shard second time on a different schema.
*/
CREATE SCHEMA "citus_split_test_schema_no_deferred_drop";
SET citus.defer_drop_after_shard_split TO OFF;
CREATE ROLE test_split_deferred_role WITH LOGIN;
GRANT USAGE, CREATE ON SCHEMA "citus_split_test_schema_no_deferred_drop" TO test_split_deferred_role;
SET ROLE test_split_deferred_role;
SET search_path TO "citus_split_test_schema_no_deferred_drop";
SET citus.next_shard_id TO 8981000;
SET citus.next_placement_id TO 8610000;
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 1;
-- BEGIN: Create table to split, along with other co-located tables. Add indexes, statistics etc.
CREATE TABLE sensors(
measureid integer,
eventdatetime date,
measure_data jsonb,
meaure_quantity decimal(15, 2),
measure_status char(1),
measure_comment varchar(44),
PRIMARY KEY (measureid, eventdatetime, measure_data));
CREATE INDEX index_on_sensors ON sensors(lower(measureid::text));
ALTER INDEX index_on_sensors ALTER COLUMN 1 SET STATISTICS 1000;
CREATE INDEX hash_index_on_sensors ON sensors USING HASH((measure_data->'IsFailed'));
CREATE INDEX index_with_include_on_sensors ON sensors ((measure_data->'IsFailed')) INCLUDE (measure_data, eventdatetime, measure_status);
CREATE STATISTICS stats_on_sensors (dependencies) ON measureid, eventdatetime FROM sensors;
SELECT create_distributed_table('sensors', 'measureid', colocate_with:='none');
-- END: Create table to split, along with other co-located tables. Add indexes, statistics etc.
-- BEGIN: Create co-located distributed and reference tables.
CREATE TABLE reference_table (measureid integer PRIMARY KEY);
SELECT create_reference_table('reference_table');
CREATE TABLE colocated_dist_table (measureid integer PRIMARY KEY);
CLUSTER colocated_dist_table USING colocated_dist_table_pkey;
SELECT create_distributed_table('colocated_dist_table', 'measureid', colocate_with:='sensors');
CREATE TABLE table_with_index_rep_identity(key int NOT NULL);
CREATE UNIQUE INDEX uqx ON table_with_index_rep_identity(key);
ALTER TABLE table_with_index_rep_identity REPLICA IDENTITY USING INDEX uqx;
CLUSTER table_with_index_rep_identity USING uqx;
SELECT create_distributed_table('table_with_index_rep_identity', 'key', colocate_with:='sensors');
-- END: Create co-located distributed and reference tables.
-- BEGIN : Create Foreign key constraints.
ALTER TABLE sensors ADD CONSTRAINT fkey_table_to_dist FOREIGN KEY (measureid) REFERENCES colocated_dist_table(measureid);
-- END : Create Foreign key constraints.
-- BEGIN : Load data into tables.
INSERT INTO reference_table SELECT i FROM generate_series(0,1000)i;
INSERT INTO colocated_dist_table SELECT i FROM generate_series(0,1000)i;
INSERT INTO sensors SELECT i, '2020-01-05', '{}', 11011.10, 'A', 'I <3 Citus' FROM generate_series(0,1000)i;
SELECT COUNT(*) FROM sensors;
SELECT COUNT(*) FROM reference_table;
SELECT COUNT(*) FROM colocated_dist_table;
-- END: Load data into tables.
-- BEGIN : Display current state.
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport
FROM pg_dist_shard AS shard
INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid
INNER JOIN pg_dist_node node ON placement.groupid = node.groupid
INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid
WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass)
ORDER BY logicalrelid, shardminvalue::BIGINT;
\c - - - :worker_1_port
SET search_path TO "citus_split_test_schema_no_deferred_drop", public, pg_catalog;
SET citus.show_shards_for_app_name_prefixes = '*';
SELECT tbl.relname, fk."Constraint", fk."Definition"
FROM pg_catalog.pg_class tbl
JOIN public.table_fkeys fk on tbl.oid = fk.relid
WHERE tbl.relname like 'sensors_%'
ORDER BY 1, 2;
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2;
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2;
SELECT stxname FROM pg_statistic_ext
WHERE stxnamespace IN (
SELECT oid
FROM pg_namespace
WHERE nspname IN ('citus_split_test_schema_no_deferred_drop')
)
ORDER BY stxname ASC;
\c - - - :worker_2_port
SET search_path TO "citus_split_test_schema_no_deferred_drop", public, pg_catalog;
SET citus.show_shards_for_app_name_prefixes = '*';
SELECT tbl.relname, fk."Constraint", fk."Definition"
FROM pg_catalog.pg_class tbl
JOIN public.table_fkeys fk on tbl.oid = fk.relid
WHERE tbl.relname like 'sensors_%'
ORDER BY 1, 2;
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2;
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2;
SELECT stxname FROM pg_statistic_ext
WHERE stxnamespace IN (
SELECT oid
FROM pg_namespace
WHERE nspname IN ('citus_split_test_schema_no_deferred_drop')
)
ORDER BY stxname ASC;
-- END : Display current state
-- BEGIN : Move one shard before we split it.
\c - postgres - :master_port
SET citus.defer_drop_after_shard_split TO OFF;
SET ROLE test_split_deferred_role;
SET search_path TO "citus_split_test_schema_no_deferred_drop";
SET citus.next_shard_id TO 8981007;
SET citus.defer_drop_after_shard_move TO OFF;
SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical');
-- END : Move one shard before we split it.
-- BEGIN : Set node id variables
SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset
SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset
-- END : Set node id variables
-- BEGIN : Split two shards : One with move and One without move.
-- Perform 2 way split
SELECT pg_catalog.citus_split_shard_by_split_points(
8981000,
ARRAY['-1073741824'],
ARRAY[:worker_1_node, :worker_2_node],
'block_writes');
-- Perform 3 way split
SELECT pg_catalog.citus_split_shard_by_split_points(
8981001,
ARRAY['536870911', '1610612735'],
ARRAY[:worker_1_node, :worker_1_node, :worker_2_node],
'block_writes');
-- END : Split two shards : One with move and One without move.
-- BEGIN : Move a shard post split.
SELECT citus_move_shard_placement(8981007, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='block_writes');
-- END : Move a shard post split.
-- BEGIN : Display current state.
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport
FROM pg_dist_shard AS shard
INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid
INNER JOIN pg_dist_node node ON placement.groupid = node.groupid
INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid
WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass)
ORDER BY logicalrelid, shardminvalue::BIGINT;
\c - - - :worker_1_port
SET search_path TO "citus_split_test_schema_no_deferred_drop", public, pg_catalog;
SET citus.show_shards_for_app_name_prefixes = '*';
SELECT tbl.relname, fk."Constraint", fk."Definition"
FROM pg_catalog.pg_class tbl
JOIN public.table_fkeys fk on tbl.oid = fk.relid
WHERE tbl.relname like 'sensors_%'
ORDER BY 1, 2;
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2;
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2;
SELECT stxname FROM pg_statistic_ext
WHERE stxnamespace IN (
SELECT oid
FROM pg_namespace
WHERE nspname IN ('citus_split_test_schema_no_deferred_drop')
)
ORDER BY stxname ASC;
\c - - - :worker_2_port
SET search_path TO "citus_split_test_schema_no_deferred_drop", public, pg_catalog;
SET citus.show_shards_for_app_name_prefixes = '*';
SELECT tbl.relname, fk."Constraint", fk."Definition"
FROM pg_catalog.pg_class tbl
JOIN public.table_fkeys fk on tbl.oid = fk.relid
WHERE tbl.relname like 'sensors_%'
ORDER BY 1, 2;
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2;
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2;
SELECT stxname FROM pg_statistic_ext
WHERE stxnamespace IN (
SELECT oid
FROM pg_namespace
WHERE nspname IN ('citus_split_test_schema_no_deferred_drop')
)
ORDER BY stxname ASC;
-- END : Display current state
-- BEGIN: Should be able to change/drop constraints
\c - postgres - :master_port
SET citus.defer_drop_after_shard_split TO OFF;
SET ROLE test_split_deferred_role;
SET search_path TO "citus_split_test_schema_no_deferred_drop";
ALTER INDEX index_on_sensors RENAME TO index_on_sensors_renamed;
ALTER INDEX index_on_sensors_renamed ALTER COLUMN 1 SET STATISTICS 200;
DROP STATISTICS stats_on_sensors;
DROP INDEX index_on_sensors_renamed;
ALTER TABLE sensors DROP CONSTRAINT fkey_table_to_dist;
-- END: Should be able to change/drop constraints
-- BEGIN: Split second time on another schema
SET search_path TO public;
SET citus.next_shard_id TO 8981031;
SELECT pg_catalog.citus_split_shard_by_split_points(
8981007,
ARRAY['-2100000000'],
ARRAY[:worker_1_node, :worker_2_node],
'block_writes');
SET search_path TO "citus_split_test_schema_no_deferred_drop";
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport
FROM pg_dist_shard AS shard
INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid
INNER JOIN pg_dist_node node ON placement.groupid = node.groupid
INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid
WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass)
ORDER BY logicalrelid, shardminvalue::BIGINT;
-- END: Split second time on another schema
-- BEGIN: Validate Data Count
SELECT COUNT(*) FROM sensors;
SELECT COUNT(*) FROM reference_table;
SELECT COUNT(*) FROM colocated_dist_table;
-- END: Validate Data Count
--BEGIN : Cleanup
\c - postgres - :master_port
DROP SCHEMA "citus_split_test_schema_no_deferred_drop" CASCADE;
SET citus.defer_drop_after_shard_split TO ON;
--END : Cleanup

View File

@ -86,29 +86,6 @@ SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode
SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_1 ADD CONSTRAINT").after(2).cancel(' || :pid || ')'); SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_1 ADD CONSTRAINT").after(2).cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- failure on dropping old colocated shard
-- Disable deferred drop otherwise we will skip the drop and operation will succeed instead of failing.
SET citus.defer_drop_after_shard_split TO OFF;
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- cancellation on dropping old colocated shard
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- failure on dropping old shard
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- cancellation on dropping old shard
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');
-- Re-enable deferred drop for rest of the tests.
SET citus.defer_drop_after_shard_split TO ON;
-- failure on foreign key creation -- failure on foreign key creation
SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey FOREIGN KEY").kill()'); SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey FOREIGN KEY").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes');

View File

@ -149,29 +149,6 @@ SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode
SELECT citus.mitmproxy('conn.onQuery(query="select pg_drop_replication_slot").cancel(' || :pid || ')'); SELECT citus.mitmproxy('conn.onQuery(query="select pg_drop_replication_slot").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- failure on dropping old shard
-- failure on dropping old colocated shard
-- Disable deferred drop otherwise we will skip the drop and operation will succeed instead of failing.
SET citus.defer_drop_after_shard_split TO OFF;
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- cancellation on dropping old shard
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- failure on dropping old colocated shard
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- cancellation on dropping old colocated shard
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").cancel(' || :pid || ')');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');
-- Re-enable deferred drop for rest of the tests.
SET citus.defer_drop_after_shard_split TO ON;
-- failure on foreign key creation -- failure on foreign key creation
SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey FOREIGN KEY").kill()'); SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey FOREIGN KEY").kill()');
SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical');

View File

@ -819,7 +819,6 @@ CREATE EXTENSION citus;
CREATE TABLE test (x int, y int); CREATE TABLE test (x int, y int);
INSERT INTO test VALUES (1,2); INSERT INTO test VALUES (1,2);
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
SET citus.defer_drop_after_shard_split TO off;
SELECT create_distributed_table_concurrently('test','x'); SELECT create_distributed_table_concurrently('test','x');
DROP TABLE test; DROP TABLE test;
TRUNCATE pg_dist_node; TRUNCATE pg_dist_node;

View File

@ -399,29 +399,6 @@ RESET citus.enable_metadata_sync;
CREATE EVENT TRIGGER abort_drop ON sql_drop CREATE EVENT TRIGGER abort_drop ON sql_drop
EXECUTE PROCEDURE abort_drop_command(); EXECUTE PROCEDURE abort_drop_command();
\c - postgres - :master_port
-- Disable deferred drop otherwise we will skip the drop and operation will succeed instead of failing.
SET citus.defer_drop_after_shard_split TO OFF;
SET ROLE mx_isolation_role_ent;
SET search_path to "Tenant Isolation";
\set VERBOSITY terse
SELECT isolate_tenant_to_new_shard('orders_streaming', 104, 'CASCADE', shard_transfer_mode => 'block_writes');
\set VERBOSITY default
-- check if metadata is changed
SELECT * FROM pg_dist_shard
WHERE logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass
ORDER BY shardminvalue::BIGINT, logicalrelid;
\c - - - :worker_1_port
SET search_path to "Tenant Isolation";
-- however, new tables are already created
SET citus.override_table_visibility TO false;
\d
\c - postgres - :worker_1_port \c - postgres - :worker_1_port
DROP EVENT TRIGGER abort_drop; DROP EVENT TRIGGER abort_drop;

View File

@ -200,7 +200,6 @@ LIMIT 1;
SELECT COUNT(*) FROM pg_dist_transaction; SELECT COUNT(*) FROM pg_dist_transaction;
BEGIN; BEGIN;
SET LOCAL citus.defer_drop_after_shard_move TO OFF;
SELECT citus_move_shard_placement((SELECT * FROM selected_shard), 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode := 'block_writes'); SELECT citus_move_shard_placement((SELECT * FROM selected_shard), 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode := 'block_writes');
COMMIT; COMMIT;
SELECT COUNT(*) FROM pg_dist_transaction; SELECT COUNT(*) FROM pg_dist_transaction;

View File

@ -6,7 +6,6 @@ SET citus.next_shard_id TO 20000000;
SET citus.shard_count TO 6; SET citus.shard_count TO 6;
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
SET citus.defer_drop_after_shard_move TO on;
CREATE SCHEMA shard_move_deferred_delete; CREATE SCHEMA shard_move_deferred_delete;
SET search_path TO shard_move_deferred_delete; SET search_path TO shard_move_deferred_delete;