From c379ff861415e83ab0600f4ee15c650601105bab Mon Sep 17 00:00:00 2001 From: Ahmet Gedemenli Date: Tue, 25 Oct 2022 16:48:34 +0300 Subject: [PATCH] Drop defer drop gucs (#6447) DESCRIPTION: Drops GUC defer_drop_after_shard_split DESCRIPTION: Drops GUC defer_drop_after_shard_move Drop GUCs and related parts from the code. Delete tests that specifically added for the GUCs. Keep tests that can be used without the GUCs. Update test output changes. The motivation for this PR is to have an "always deferring" mechanism. These two GUCs provide an option to not deferring dropping objects during a shard move/split, and dropping them immediately. With this PR, we will be always deferring dropping orphaned shards and other types of objects. We will have a separate PR to extend the deferred cleanup operation, so that we would create records for deferred drop, for Subscriptions, Publications, Replication Slots etc. This will make us be able to keep track of created objects that needs to be dropped, during a shard move/split. We will have objects created specifically for the current operation; and those objects will be dropped at the end. We have an issue (a draft roadmap) for enabling parallel shard moves. For details please see: https://github.com/citusdata/citus/issues/6437 --- .../distributed/operations/shard_split.c | 96 +-- .../distributed/operations/shard_transfer.c | 50 +- src/backend/distributed/shared_library_init.c | 32 +- src/include/distributed/shard_cleaner.h | 2 - ...citus_non_blocking_shard_split_cleanup.out | 1 - ...citus_non_blocking_split_shard_cleanup.out | 1 - .../citus_non_blocking_split_shards.out | 5 +- ...s_non_blocking_splits_no_deferred_drop.out | 565 ------------------ .../citus_split_shard_by_split_points.out | 5 +- .../citus_split_shard_no_deferred_drop.out | 464 -------------- .../expected/failure_tenant_isolation.out | 42 -- .../failure_tenant_isolation_nonblocking.out | 43 -- ...gical_replication_multi_shard_commands.out | 140 ++--- ...ical_replication_single_shard_commands.out | 98 +-- .../isolation_rebalancer_deferred_drop.out | 36 +- src/test/regress/expected/multi_extension.out | 1 - .../expected/multi_tenant_isolation.out | 154 ++--- .../expected/multi_transaction_recovery.out | 1 - .../expected/shard_move_deferred_delete.out | 1 - .../isolation_rebalancer_deferred_drop.spec | 19 - src/test/regress/split_schedule | 2 - ...citus_non_blocking_split_shard_cleanup.sql | 1 - .../sql/citus_non_blocking_split_shards.sql | 5 +- ...s_non_blocking_splits_no_deferred_drop.sql | 294 --------- .../sql/citus_split_shard_by_split_points.sql | 4 +- .../citus_split_shard_no_deferred_drop.sql | 246 -------- .../regress/sql/failure_tenant_isolation.sql | 23 - .../failure_tenant_isolation_nonblocking.sql | 23 - src/test/regress/sql/multi_extension.sql | 1 - .../regress/sql/multi_tenant_isolation.sql | 23 - .../sql/multi_transaction_recovery.sql | 1 - .../sql/shard_move_deferred_delete.sql | 1 - 32 files changed, 199 insertions(+), 2181 deletions(-) delete mode 100644 src/test/regress/expected/citus_non_blocking_splits_no_deferred_drop.out delete mode 100644 src/test/regress/expected/citus_split_shard_no_deferred_drop.out delete mode 100644 src/test/regress/sql/citus_non_blocking_splits_no_deferred_drop.sql delete mode 100644 src/test/regress/sql/citus_split_shard_no_deferred_drop.sql diff --git a/src/backend/distributed/operations/shard_split.c b/src/backend/distributed/operations/shard_split.c index 14e250af9..e62412c7c 100644 --- a/src/backend/distributed/operations/shard_split.c +++ b/src/backend/distributed/operations/shard_split.c @@ -46,9 +46,6 @@ #include "distributed/shard_rebalancer.h" #include "postmaster/postmaster.h" -/* declarations for dynamic loading */ -bool DeferShardDeleteOnSplit = true; - /* * Entry for map that tracks ShardInterval -> Placement Node * created by split workflow. @@ -159,7 +156,6 @@ static uint64 GetNextShardIdForSplitChild(void); static void AcquireNonblockingSplitLock(Oid relationId); static List * GetWorkerNodesFromWorkerIds(List *nodeIdsForPlacementList); static void DropShardListMetadata(List *shardIntervalList); -static void DropShardList(List *shardIntervalList); static void InsertDeferredDropCleanupRecordsForShards(List *shardIntervalList); /* Customize error message strings based on operation type */ @@ -633,26 +629,15 @@ BlockingShardSplit(SplitOperation splitOperation, /* - * Delete old shards metadata and either mark the shards as - * to be deferred drop or physically delete them. + * Delete old shards metadata and mark the shards as to be deferred drop. * Have to do that before creating the new shard metadata, * because there's cross-checks preventing inconsistent metadata * (like overlapping shards). */ - if (DeferShardDeleteOnSplit) - { - ereport(LOG, (errmsg("marking deferred cleanup of source shard(s) for %s", - operationName))); + ereport(LOG, (errmsg("marking deferred cleanup of source shard(s) for %s", + operationName))); - InsertDeferredDropCleanupRecordsForShards(sourceColocatedShardIntervalList); - } - else - { - ereport(LOG, (errmsg("performing cleanup of source shard(s) for %s", - operationName))); - - DropShardList(sourceColocatedShardIntervalList); - } + InsertDeferredDropCleanupRecordsForShards(sourceColocatedShardIntervalList); DropShardListMetadata(sourceColocatedShardIntervalList); @@ -670,7 +655,7 @@ BlockingShardSplit(SplitOperation splitOperation, /* * Create foreign keys if exists after the metadata changes happening in - * DropShardList() and InsertSplitChildrenShardMetadata() because the foreign + * InsertSplitChildrenShardMetadata() because the foreign * key creation depends on the new metadata. */ CreateForeignKeyConstraints(shardGroupSplitIntervalListList, @@ -1393,54 +1378,8 @@ DropShardListMetadata(List *shardIntervalList) /* - * DropShardList drops actual shards from the worker nodes. - */ -static void -DropShardList(List *shardIntervalList) -{ - ListCell *shardIntervalCell = NULL; - - foreach(shardIntervalCell, shardIntervalList) - { - ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell); - ListCell *shardPlacementCell = NULL; - uint64 oldShardId = shardInterval->shardId; - - /* delete shard placements */ - List *shardPlacementList = ActiveShardPlacementList(oldShardId); - foreach(shardPlacementCell, shardPlacementList) - { - ShardPlacement *placement = (ShardPlacement *) lfirst(shardPlacementCell); - char *workerName = placement->nodeName; - uint32 workerPort = placement->nodePort; - StringInfo dropQuery = makeStringInfo(); - - /* get shard name */ - char *qualifiedShardName = ConstructQualifiedShardName(shardInterval); - - char storageType = shardInterval->storageType; - if (storageType == SHARD_STORAGE_TABLE) - { - appendStringInfo(dropQuery, DROP_REGULAR_TABLE_COMMAND, - qualifiedShardName); - } - else if (storageType == SHARD_STORAGE_FOREIGN) - { - appendStringInfo(dropQuery, DROP_FOREIGN_TABLE_COMMAND, - qualifiedShardName); - } - - /* drop old shard */ - SendCommandToWorker(workerName, workerPort, dropQuery->data); - } - } -} - - -/* - * If deferred drop is enabled, insert deferred cleanup records instead of - * dropping actual shards from the worker nodes. The shards will be dropped - * by background cleaner later. + * Insert deferred cleanup records. + * The shards will be dropped by background cleaner later. */ static void InsertDeferredDropCleanupRecordsForShards(List *shardIntervalList) @@ -1698,26 +1637,15 @@ NonBlockingShardSplit(SplitOperation splitOperation, SHARD_SPLIT); /* - * 10) Delete old shards metadata and either mark the shards as - * to be deferred drop or physically delete them. + * 10) Delete old shards metadata and mark the shards as to be deferred drop. * Have to do that before creating the new shard metadata, * because there's cross-checks preventing inconsistent metadata * (like overlapping shards). */ - if (DeferShardDeleteOnSplit) - { - ereport(LOG, (errmsg("marking deferred cleanup of source shard(s) for %s", - operationName))); + ereport(LOG, (errmsg("marking deferred cleanup of source shard(s) for %s", + operationName))); - InsertDeferredDropCleanupRecordsForShards(sourceColocatedShardIntervalList); - } - else - { - ereport(LOG, (errmsg("performing cleanup of source shard(s) for %s", - operationName))); - - DropShardList(sourceColocatedShardIntervalList); - } + InsertDeferredDropCleanupRecordsForShards(sourceColocatedShardIntervalList); DropShardListMetadata(sourceColocatedShardIntervalList); @@ -1769,7 +1697,7 @@ NonBlockingShardSplit(SplitOperation splitOperation, /* * 14) Create foreign keys if exists after the metadata changes happening in - * DropShardList() and InsertSplitChildrenShardMetadata() because the foreign + * InsertSplitChildrenShardMetadata() because the foreign * key creation depends on the new metadata. */ CreateUncheckedForeignKeyConstraints(logicalRepTargetList); diff --git a/src/backend/distributed/operations/shard_transfer.c b/src/backend/distributed/operations/shard_transfer.c index 5d50417cc..de8217cfd 100644 --- a/src/backend/distributed/operations/shard_transfer.c +++ b/src/backend/distributed/operations/shard_transfer.c @@ -101,10 +101,9 @@ static List * RecreateTableDDLCommandList(Oid relationId); static void EnsureTableListOwner(List *tableIdList); static void EnsureTableListSuitableForReplication(List *tableIdList); -static void DropColocatedShardPlacement(ShardInterval *shardInterval, char *nodeName, - int32 nodePort); static void MarkForDropColocatedShardPlacement(ShardInterval *shardInterval, - char *nodeName, int32 nodePort); + char *nodeName, + int32 nodePort); static void UpdateColocatedShardPlacementMetadataOnWorkers(int64 shardId, char *sourceNodeName, int32 sourceNodePort, @@ -138,8 +137,6 @@ PG_FUNCTION_INFO_V1(master_copy_shard_placement); PG_FUNCTION_INFO_V1(citus_move_shard_placement); PG_FUNCTION_INFO_V1(master_move_shard_placement); -bool DeferShardDeleteOnMove = true; - double DesiredPercentFreeAfterMove = 10; bool CheckAvailableSpaceBeforeMove = true; @@ -402,14 +399,7 @@ citus_move_shard_placement(PG_FUNCTION_ARGS) } /* since this is move operation, we remove shards from source node after copy */ - if (DeferShardDeleteOnMove) - { - MarkForDropColocatedShardPlacement(shardInterval, sourceNodeName, sourceNodePort); - } - else - { - DropColocatedShardPlacement(shardInterval, sourceNodeName, sourceNodePort); - } + MarkForDropColocatedShardPlacement(shardInterval, sourceNodeName, sourceNodePort); UpdateColocatedShardPlacementMetadataOnWorkers(shardId, sourceNodeName, sourceNodePort, targetNodeName, @@ -1872,44 +1862,14 @@ RecreateTableDDLCommandList(Oid relationId) } -/* - * DropColocatedShardPlacement deletes the shard placement metadata for the given shard - * placement from the pg_dist_placement, and then it drops the shard table - * from the given node. The function does this for all colocated placements. - */ -static void -DropColocatedShardPlacement(ShardInterval *shardInterval, char *nodeName, int32 nodePort) -{ - List *colocatedShardList = ColocatedShardIntervalList(shardInterval); - ListCell *colocatedShardCell = NULL; - - foreach(colocatedShardCell, colocatedShardList) - { - ShardInterval *colocatedShard = (ShardInterval *) lfirst(colocatedShardCell); - char *qualifiedTableName = ConstructQualifiedShardName(colocatedShard); - StringInfo dropQuery = makeStringInfo(); - uint64 shardId = colocatedShard->shardId; - List *shardPlacementList = - ShardPlacementListIncludingOrphanedPlacements(shardId); - ShardPlacement *placement = - SearchShardPlacementInListOrError(shardPlacementList, nodeName, nodePort); - - appendStringInfo(dropQuery, DROP_REGULAR_TABLE_COMMAND, qualifiedTableName); - - DeleteShardPlacementRow(placement->placementId); - SendCommandToWorker(nodeName, nodePort, dropQuery->data); - } -} - - /* * MarkForDropColocatedShardPlacement marks the shard placement metadata for * the given shard placement to be deleted in pg_dist_placement. The function * does this for all colocated placements. */ static void -MarkForDropColocatedShardPlacement(ShardInterval *shardInterval, char *nodeName, int32 - nodePort) +MarkForDropColocatedShardPlacement(ShardInterval *shardInterval, char *nodeName, + int32 nodePort) { List *colocatedShardList = ColocatedShardIntervalList(shardInterval); ListCell *colocatedShardCell = NULL; diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index 42822470e..d2ccfb136 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -148,6 +148,8 @@ DEFINE_COLUMNAR_PASSTHROUGH_FUNC(test_columnar_storage_write_new_page) static char *CitusVersion = CITUS_VERSION; static char *DeprecatedEmptyString = ""; static char *MitmfifoEmptyString = ""; +static bool DeprecatedDeferShardDeleteOnMove = true; +static bool DeprecatedDeferShardDeleteOnSplit = true; /* deprecated GUC value that should not be used anywhere outside this file */ static int ReplicationModel = REPLICATION_MODEL_STREAMING; @@ -1010,18 +1012,9 @@ RegisterCitusConfigVariables(void) DefineCustomBoolVariable( "citus.defer_drop_after_shard_move", - gettext_noop("When enabled a shard move will mark the original shards " - "for deletion after a successful move, instead of deleting " - "them right away."), - gettext_noop("The deletion of a shard can sometimes run into a conflict with a " - "long running transactions on a the shard during the drop phase of " - "the shard move. This causes some moves to be rolled back after " - "resources have been spend on moving the shard. To prevent " - "conflicts this feature lets you skip the actual deletion till a " - "later point in time. When used one should set " - "citus.defer_shard_delete_interval to make sure defered deletions " - "will be executed"), - &DeferShardDeleteOnMove, + gettext_noop("Deprecated, Citus always defers drop after shard move"), + NULL, + &DeprecatedDeferShardDeleteOnMove, true, PGC_USERSET, 0, @@ -1029,18 +1022,9 @@ RegisterCitusConfigVariables(void) DefineCustomBoolVariable( "citus.defer_drop_after_shard_split", - gettext_noop("When enabled a shard split will mark the original shards " - "for deletion after a successful split, instead of deleting " - "them right away."), - gettext_noop("The deletion of a shard can sometimes run into a conflict with a " - "long running transactions on a the shard during the drop phase of " - "the shard split. This causes some splits to be rolled back after " - "resources have been spend on moving the shard. To prevent " - "conflicts this feature lets you skip the actual deletion till a " - "later point in time. When used one should set " - "citus.defer_shard_delete_interval to make sure defered deletions " - "will be executed"), - &DeferShardDeleteOnSplit, + gettext_noop("Deprecated, Citus always defers drop after shard split"), + NULL, + &DeprecatedDeferShardDeleteOnSplit, true, PGC_USERSET, 0, diff --git a/src/include/distributed/shard_cleaner.h b/src/include/distributed/shard_cleaner.h index 55633f653..6a1f76664 100644 --- a/src/include/distributed/shard_cleaner.h +++ b/src/include/distributed/shard_cleaner.h @@ -14,11 +14,9 @@ /* GUC to configure deferred shard deletion */ extern int DeferShardDeleteInterval; extern int BackgroundTaskQueueCheckInterval; -extern bool DeferShardDeleteOnMove; extern double DesiredPercentFreeAfterMove; extern bool CheckAvailableSpaceBeforeMove; -extern bool DeferShardDeleteOnSplit; extern int NextOperationId; extern int NextCleanupRecordId; diff --git a/src/test/regress/expected/citus_non_blocking_shard_split_cleanup.out b/src/test/regress/expected/citus_non_blocking_shard_split_cleanup.out index 2cfa1a716..5fe6dc0d0 100644 --- a/src/test/regress/expected/citus_non_blocking_shard_split_cleanup.out +++ b/src/test/regress/expected/citus_non_blocking_shard_split_cleanup.out @@ -41,7 +41,6 @@ INSERT INTO sensors SELECT i, '2020-01-05', '{}', 11011.10, 'A', 'I <3 Citus' FR SET ROLE test_split_role; SET search_path TO "citus_split_test_schema"; SET citus.next_shard_id TO 8981007; -SET citus.defer_drop_after_shard_move TO OFF; SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); citus_move_shard_placement --------------------------------------------------------------------- diff --git a/src/test/regress/expected/citus_non_blocking_split_shard_cleanup.out b/src/test/regress/expected/citus_non_blocking_split_shard_cleanup.out index afa3c89e4..bf6467779 100644 --- a/src/test/regress/expected/citus_non_blocking_split_shard_cleanup.out +++ b/src/test/regress/expected/citus_non_blocking_split_shard_cleanup.out @@ -31,7 +31,6 @@ INSERT INTO sensors SELECT i, '2020-01-05', '{}', 11011.10, 'A', 'I <3 Citus' FR \c - postgres - :master_port SET search_path TO "citus_split_test_schema"; SET citus.next_shard_id TO 8981007; -SET citus.defer_drop_after_shard_move TO OFF; -- BEGIN : Set node id variables SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset diff --git a/src/test/regress/expected/citus_non_blocking_split_shards.out b/src/test/regress/expected/citus_non_blocking_split_shards.out index 59ea77f61..ad0afd3e8 100644 --- a/src/test/regress/expected/citus_non_blocking_split_shards.out +++ b/src/test/regress/expected/citus_non_blocking_split_shards.out @@ -211,7 +211,6 @@ SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, node SET ROLE test_shard_split_role; SET search_path TO "citus_split_test_schema"; SET citus.next_shard_id TO 8981007; -SET citus.defer_drop_after_shard_move TO OFF; SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); citus_move_shard_placement --------------------------------------------------------------------- @@ -219,6 +218,8 @@ SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localho (1 row) -- END : Move one shard before we split it. +CALL pg_catalog.citus_cleanup_orphaned_shards(); +NOTICE: cleaned up 3 orphaned shards -- BEGIN : Set node id variables SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset @@ -263,6 +264,8 @@ SELECT citus_move_shard_placement(8981007, 'localhost', :worker_1_port, 'localho (1 row) -- END : Move a shard post split. +CALL pg_catalog.citus_cleanup_orphaned_shards(); +NOTICE: cleaned up 3 orphaned shards -- BEGIN : Display current state. SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport FROM pg_dist_shard AS shard diff --git a/src/test/regress/expected/citus_non_blocking_splits_no_deferred_drop.out b/src/test/regress/expected/citus_non_blocking_splits_no_deferred_drop.out deleted file mode 100644 index 4a66b5c48..000000000 --- a/src/test/regress/expected/citus_non_blocking_splits_no_deferred_drop.out +++ /dev/null @@ -1,565 +0,0 @@ -/* -This suite runs without deferred drop enabled. - -Citus Shard Split Test.The test is model similar to 'shard_move_constraints'. -Here is a high level overview of test plan: - 1. Create a table 'sensors' (ShardCount = 2) to be split. Add indexes and statistics on this table. - 2. Create two other tables: 'reference_table' and 'colocated_dist_table', co-located with sensors. - 3. Create Foreign key constraints between the two co-located distributed tables. - 4. Load data into the three tables. - 5. Move one of the shards for 'sensors' to test ShardMove -> Split. - 6. Trigger Split on both shards of 'sensors'. This will also split co-located tables. - 7. Move one of the split shard to test Split -> ShardMove. - 8. Split an already split shard second time on a different schema. - 9. Create a colocated table with no replica identity. - 10. Show we do not allow Split with the shard transfer mode 'auto' if any colocated table has no replica identity. - 11. Drop the colocated table with no replica identity. - 12. Show we allow Split with the shard transfer mode 'auto' if all colocated tables has replica identity. -*/ -CREATE SCHEMA "citus_split_test_schema_no_deferred_drop"; -SET citus.defer_drop_after_shard_split TO OFF; -CREATE ROLE test_shard_split_role_nodeferred_drop WITH LOGIN; -GRANT USAGE, CREATE ON SCHEMA "citus_split_test_schema_no_deferred_drop" TO test_shard_split_role_nodeferred_drop; -SET ROLE test_shard_split_role_nodeferred_drop; -SET search_path TO "citus_split_test_schema_no_deferred_drop"; -SET citus.next_shard_id TO 8981000; -SET citus.next_placement_id TO 8610000; -SET citus.shard_count TO 2; -SET citus.shard_replication_factor TO 1; --- BEGIN: Create table to split, along with other co-located tables. Add indexes, statistics etc. -CREATE TABLE sensors( - measureid integer, - eventdatetime date, - measure_data jsonb, - meaure_quantity decimal(15, 2), - measure_status char(1), - measure_comment varchar(44), - PRIMARY KEY (measureid, eventdatetime, measure_data)); -CREATE INDEX index_on_sensors ON sensors(lower(measureid::text)); -ALTER INDEX index_on_sensors ALTER COLUMN 1 SET STATISTICS 1000; -CREATE INDEX hash_index_on_sensors ON sensors USING HASH((measure_data->'IsFailed')); -CREATE INDEX index_with_include_on_sensors ON sensors ((measure_data->'IsFailed')) INCLUDE (measure_data, eventdatetime, measure_status); -CREATE STATISTICS stats_on_sensors (dependencies) ON measureid, eventdatetime FROM sensors; -SELECT create_distributed_table('sensors', 'measureid', colocate_with:='none'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - --- END: Create table to split, along with other co-located tables. Add indexes, statistics etc. --- BEGIN: Create co-located distributed and reference tables. -CREATE TABLE reference_table (measureid integer PRIMARY KEY); -SELECT create_reference_table('reference_table'); - create_reference_table ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE colocated_dist_table (measureid integer PRIMARY KEY); -CLUSTER colocated_dist_table USING colocated_dist_table_pkey; -SELECT create_distributed_table('colocated_dist_table', 'measureid', colocate_with:='sensors'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE table_with_index_rep_identity(key int NOT NULL); -CREATE UNIQUE INDEX uqx ON table_with_index_rep_identity(key); -ALTER TABLE table_with_index_rep_identity REPLICA IDENTITY USING INDEX uqx; -CLUSTER table_with_index_rep_identity USING uqx; -SELECT create_distributed_table('table_with_index_rep_identity', 'key', colocate_with:='sensors'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - --- END: Create co-located distributed and reference tables. --- BEGIN : Create Foreign key constraints. -ALTER TABLE sensors ADD CONSTRAINT fkey_table_to_dist FOREIGN KEY (measureid) REFERENCES colocated_dist_table(measureid); --- END : Create Foreign key constraints. --- BEGIN : Load data into tables. -INSERT INTO reference_table SELECT i FROM generate_series(0,1000)i; -INSERT INTO colocated_dist_table SELECT i FROM generate_series(0,1000)i; -INSERT INTO sensors SELECT i, '2020-01-05', '{}', 11011.10, 'A', 'I <3 Citus' FROM generate_series(0,1000)i; -SELECT COUNT(*) FROM sensors; - count ---------------------------------------------------------------------- - 1001 -(1 row) - -SELECT COUNT(*) FROM reference_table; - count ---------------------------------------------------------------------- - 1001 -(1 row) - -SELECT COUNT(*) FROM colocated_dist_table; - count ---------------------------------------------------------------------- - 1001 -(1 row) - --- END: Load data into tables. --- BEGIN : Display current state. -SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport - FROM pg_dist_shard AS shard - INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid - INNER JOIN pg_dist_node node ON placement.groupid = node.groupid - INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid - WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass) - ORDER BY logicalrelid, shardminvalue::BIGINT; - shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport ---------------------------------------------------------------------- - 8981000 | sensors | -2147483648 | -1 | localhost | 57637 - 8981001 | sensors | 0 | 2147483647 | localhost | 57638 - 8981003 | colocated_dist_table | -2147483648 | -1 | localhost | 57637 - 8981004 | colocated_dist_table | 0 | 2147483647 | localhost | 57638 - 8981005 | table_with_index_rep_identity | -2147483648 | -1 | localhost | 57637 - 8981006 | table_with_index_rep_identity | 0 | 2147483647 | localhost | 57638 -(6 rows) - -\c - - - :worker_1_port - SET search_path TO "citus_split_test_schema_no_deferred_drop", public, pg_catalog; - SET citus.show_shards_for_app_name_prefixes = '*'; - SELECT tbl.relname, fk."Constraint", fk."Definition" - FROM pg_catalog.pg_class tbl - JOIN public.table_fkeys fk on tbl.oid = fk.relid - WHERE tbl.relname like 'sensors_%' - ORDER BY 1, 2; - relname | Constraint | Definition ---------------------------------------------------------------------- - sensors_8981000 | fkey_table_to_dist_8981000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981003(measureid) -(1 row) - - SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2; - tablename | indexdef ---------------------------------------------------------------------- - sensors_8981000 | CREATE INDEX hash_index_on_sensors_8981000 ON citus_split_test_schema_no_deferred_drop.sensors_8981000 USING hash (((measure_data -> 'IsFailed'::text))) - sensors_8981000 | CREATE INDEX index_on_sensors_8981000 ON citus_split_test_schema_no_deferred_drop.sensors_8981000 USING btree (lower((measureid)::text)) - sensors_8981000 | CREATE INDEX index_with_include_on_sensors_8981000 ON citus_split_test_schema_no_deferred_drop.sensors_8981000 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status) - sensors_8981000 | CREATE UNIQUE INDEX sensors_pkey_8981000 ON citus_split_test_schema_no_deferred_drop.sensors_8981000 USING btree (measureid, eventdatetime, measure_data) -(4 rows) - - SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2; - tablename | indexdef ---------------------------------------------------------------------- - table_with_index_rep_identity_8981005 | CREATE UNIQUE INDEX uqx_8981005 ON citus_split_test_schema_no_deferred_drop.table_with_index_rep_identity_8981005 USING btree (key) -(1 row) - - SELECT stxname FROM pg_statistic_ext - WHERE stxnamespace IN ( - SELECT oid - FROM pg_namespace - WHERE nspname IN ('citus_split_test_schema_no_deferred_drop') - ) - ORDER BY stxname ASC; - stxname ---------------------------------------------------------------------- - stats_on_sensors - stats_on_sensors_8981000 -(2 rows) - -\c - - - :worker_2_port - SET search_path TO "citus_split_test_schema_no_deferred_drop", public, pg_catalog; - SET citus.show_shards_for_app_name_prefixes = '*'; - SELECT tbl.relname, fk."Constraint", fk."Definition" - FROM pg_catalog.pg_class tbl - JOIN public.table_fkeys fk on tbl.oid = fk.relid - WHERE tbl.relname like 'sensors_%' - ORDER BY 1, 2; - relname | Constraint | Definition ---------------------------------------------------------------------- - sensors_8981001 | fkey_table_to_dist_8981001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981004(measureid) -(1 row) - - SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2; - tablename | indexdef ---------------------------------------------------------------------- - sensors_8981001 | CREATE INDEX hash_index_on_sensors_8981001 ON citus_split_test_schema_no_deferred_drop.sensors_8981001 USING hash (((measure_data -> 'IsFailed'::text))) - sensors_8981001 | CREATE INDEX index_on_sensors_8981001 ON citus_split_test_schema_no_deferred_drop.sensors_8981001 USING btree (lower((measureid)::text)) - sensors_8981001 | CREATE INDEX index_with_include_on_sensors_8981001 ON citus_split_test_schema_no_deferred_drop.sensors_8981001 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status) - sensors_8981001 | CREATE UNIQUE INDEX sensors_pkey_8981001 ON citus_split_test_schema_no_deferred_drop.sensors_8981001 USING btree (measureid, eventdatetime, measure_data) -(4 rows) - - SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2; - tablename | indexdef ---------------------------------------------------------------------- - table_with_index_rep_identity_8981006 | CREATE UNIQUE INDEX uqx_8981006 ON citus_split_test_schema_no_deferred_drop.table_with_index_rep_identity_8981006 USING btree (key) -(1 row) - - SELECT stxname FROM pg_statistic_ext - WHERE stxnamespace IN ( - SELECT oid - FROM pg_namespace - WHERE nspname IN ('citus_split_test_schema_no_deferred_drop') - ) - ORDER BY stxname ASC; - stxname ---------------------------------------------------------------------- - stats_on_sensors - stats_on_sensors_8981001 -(2 rows) - --- END : Display current state --- BEGIN : Move one shard before we split it. -\c - postgres - :master_port -SET citus.defer_drop_after_shard_split TO OFF; -SET ROLE test_shard_split_role_nodeferred_drop; -SET search_path TO "citus_split_test_schema_no_deferred_drop"; -SET citus.next_shard_id TO 8981007; -SET citus.defer_drop_after_shard_move TO OFF; -SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); - citus_move_shard_placement ---------------------------------------------------------------------- - -(1 row) - --- END : Move one shard before we split it. --- BEGIN : Set node id variables -SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset -SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset --- END : Set node id variables --- BEGIN : Split two shards : One with move and One without move. --- Perform 2 way split -SELECT pg_catalog.citus_split_shard_by_split_points( - 8981000, - ARRAY['-1073741824'], - ARRAY[:worker_1_node, :worker_2_node], - 'force_logical'); - citus_split_shard_by_split_points ---------------------------------------------------------------------- - -(1 row) - --- Perform 3 way split -SELECT pg_catalog.citus_split_shard_by_split_points( - 8981001, - ARRAY['536870911', '1610612735'], - ARRAY[:worker_1_node, :worker_1_node, :worker_2_node], - 'force_logical'); - citus_split_shard_by_split_points ---------------------------------------------------------------------- - -(1 row) - --- END : Split two shards : One with move and One without move. --- BEGIN : Move a shard post split. -SELECT citus_move_shard_placement(8981007, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='block_writes'); - citus_move_shard_placement ---------------------------------------------------------------------- - -(1 row) - --- END : Move a shard post split. --- BEGIN : Display current state. -SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport - FROM pg_dist_shard AS shard - INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid - INNER JOIN pg_dist_node node ON placement.groupid = node.groupid - INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid - WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass) - ORDER BY logicalrelid, shardminvalue::BIGINT; - shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport ---------------------------------------------------------------------- - 8981007 | sensors | -2147483648 | -1073741824 | localhost | 57638 - 8981008 | sensors | -1073741823 | -1 | localhost | 57638 - 8981013 | sensors | 0 | 536870911 | localhost | 57637 - 8981014 | sensors | 536870912 | 1610612735 | localhost | 57637 - 8981015 | sensors | 1610612736 | 2147483647 | localhost | 57638 - 8981009 | colocated_dist_table | -2147483648 | -1073741824 | localhost | 57638 - 8981010 | colocated_dist_table | -1073741823 | -1 | localhost | 57638 - 8981016 | colocated_dist_table | 0 | 536870911 | localhost | 57637 - 8981017 | colocated_dist_table | 536870912 | 1610612735 | localhost | 57637 - 8981018 | colocated_dist_table | 1610612736 | 2147483647 | localhost | 57638 - 8981011 | table_with_index_rep_identity | -2147483648 | -1073741824 | localhost | 57638 - 8981012 | table_with_index_rep_identity | -1073741823 | -1 | localhost | 57638 - 8981019 | table_with_index_rep_identity | 0 | 536870911 | localhost | 57637 - 8981020 | table_with_index_rep_identity | 536870912 | 1610612735 | localhost | 57637 - 8981021 | table_with_index_rep_identity | 1610612736 | 2147483647 | localhost | 57638 -(15 rows) - -\c - - - :worker_1_port - SET search_path TO "citus_split_test_schema_no_deferred_drop", public, pg_catalog; - SET citus.show_shards_for_app_name_prefixes = '*'; - SELECT tbl.relname, fk."Constraint", fk."Definition" - FROM pg_catalog.pg_class tbl - JOIN public.table_fkeys fk on tbl.oid = fk.relid - WHERE tbl.relname like 'sensors_%' - ORDER BY 1, 2; - relname | Constraint | Definition ---------------------------------------------------------------------- - sensors_8981013 | fkey_table_to_dist_8981013 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981016(measureid) - sensors_8981014 | fkey_table_to_dist_8981014 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981017(measureid) -(2 rows) - - SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2; - tablename | indexdef ---------------------------------------------------------------------- - sensors_8981013 | CREATE INDEX hash_index_on_sensors_8981013 ON citus_split_test_schema_no_deferred_drop.sensors_8981013 USING hash (((measure_data -> 'IsFailed'::text))) - sensors_8981013 | CREATE INDEX index_on_sensors_8981013 ON citus_split_test_schema_no_deferred_drop.sensors_8981013 USING btree (lower((measureid)::text)) - sensors_8981013 | CREATE INDEX index_with_include_on_sensors_8981013 ON citus_split_test_schema_no_deferred_drop.sensors_8981013 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status) - sensors_8981013 | CREATE UNIQUE INDEX sensors_pkey_8981013 ON citus_split_test_schema_no_deferred_drop.sensors_8981013 USING btree (measureid, eventdatetime, measure_data) - sensors_8981014 | CREATE INDEX hash_index_on_sensors_8981014 ON citus_split_test_schema_no_deferred_drop.sensors_8981014 USING hash (((measure_data -> 'IsFailed'::text))) - sensors_8981014 | CREATE INDEX index_on_sensors_8981014 ON citus_split_test_schema_no_deferred_drop.sensors_8981014 USING btree (lower((measureid)::text)) - sensors_8981014 | CREATE INDEX index_with_include_on_sensors_8981014 ON citus_split_test_schema_no_deferred_drop.sensors_8981014 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status) - sensors_8981014 | CREATE UNIQUE INDEX sensors_pkey_8981014 ON citus_split_test_schema_no_deferred_drop.sensors_8981014 USING btree (measureid, eventdatetime, measure_data) -(8 rows) - - SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2; - tablename | indexdef ---------------------------------------------------------------------- - table_with_index_rep_identity_8981019 | CREATE UNIQUE INDEX uqx_8981019 ON citus_split_test_schema_no_deferred_drop.table_with_index_rep_identity_8981019 USING btree (key) - table_with_index_rep_identity_8981020 | CREATE UNIQUE INDEX uqx_8981020 ON citus_split_test_schema_no_deferred_drop.table_with_index_rep_identity_8981020 USING btree (key) -(2 rows) - - SELECT stxname FROM pg_statistic_ext - WHERE stxnamespace IN ( - SELECT oid - FROM pg_namespace - WHERE nspname IN ('citus_split_test_schema_no_deferred_drop') - ) - ORDER BY stxname ASC; - stxname ---------------------------------------------------------------------- - stats_on_sensors - stats_on_sensors_8981013 - stats_on_sensors_8981014 -(3 rows) - -\c - - - :worker_2_port - SET search_path TO "citus_split_test_schema_no_deferred_drop", public, pg_catalog; - SET citus.show_shards_for_app_name_prefixes = '*'; - SELECT tbl.relname, fk."Constraint", fk."Definition" - FROM pg_catalog.pg_class tbl - JOIN public.table_fkeys fk on tbl.oid = fk.relid - WHERE tbl.relname like 'sensors_%' - ORDER BY 1, 2; - relname | Constraint | Definition ---------------------------------------------------------------------- - sensors_8981007 | fkey_table_to_dist_8981007 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981009(measureid) - sensors_8981008 | fkey_table_to_dist_8981008 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981010(measureid) - sensors_8981015 | fkey_table_to_dist_8981015 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981018(measureid) -(3 rows) - - SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2; - tablename | indexdef ---------------------------------------------------------------------- - sensors_8981007 | CREATE INDEX hash_index_on_sensors_8981007 ON citus_split_test_schema_no_deferred_drop.sensors_8981007 USING hash (((measure_data -> 'IsFailed'::text))) - sensors_8981007 | CREATE INDEX index_on_sensors_8981007 ON citus_split_test_schema_no_deferred_drop.sensors_8981007 USING btree (lower((measureid)::text)) - sensors_8981007 | CREATE INDEX index_with_include_on_sensors_8981007 ON citus_split_test_schema_no_deferred_drop.sensors_8981007 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status) - sensors_8981007 | CREATE UNIQUE INDEX sensors_pkey_8981007 ON citus_split_test_schema_no_deferred_drop.sensors_8981007 USING btree (measureid, eventdatetime, measure_data) - sensors_8981008 | CREATE INDEX hash_index_on_sensors_8981008 ON citus_split_test_schema_no_deferred_drop.sensors_8981008 USING hash (((measure_data -> 'IsFailed'::text))) - sensors_8981008 | CREATE INDEX index_on_sensors_8981008 ON citus_split_test_schema_no_deferred_drop.sensors_8981008 USING btree (lower((measureid)::text)) - sensors_8981008 | CREATE INDEX index_with_include_on_sensors_8981008 ON citus_split_test_schema_no_deferred_drop.sensors_8981008 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status) - sensors_8981008 | CREATE UNIQUE INDEX sensors_pkey_8981008 ON citus_split_test_schema_no_deferred_drop.sensors_8981008 USING btree (measureid, eventdatetime, measure_data) - sensors_8981015 | CREATE INDEX hash_index_on_sensors_8981015 ON citus_split_test_schema_no_deferred_drop.sensors_8981015 USING hash (((measure_data -> 'IsFailed'::text))) - sensors_8981015 | CREATE INDEX index_on_sensors_8981015 ON citus_split_test_schema_no_deferred_drop.sensors_8981015 USING btree (lower((measureid)::text)) - sensors_8981015 | CREATE INDEX index_with_include_on_sensors_8981015 ON citus_split_test_schema_no_deferred_drop.sensors_8981015 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status) - sensors_8981015 | CREATE UNIQUE INDEX sensors_pkey_8981015 ON citus_split_test_schema_no_deferred_drop.sensors_8981015 USING btree (measureid, eventdatetime, measure_data) -(12 rows) - - SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2; - tablename | indexdef ---------------------------------------------------------------------- - table_with_index_rep_identity_8981011 | CREATE UNIQUE INDEX uqx_8981011 ON citus_split_test_schema_no_deferred_drop.table_with_index_rep_identity_8981011 USING btree (key) - table_with_index_rep_identity_8981012 | CREATE UNIQUE INDEX uqx_8981012 ON citus_split_test_schema_no_deferred_drop.table_with_index_rep_identity_8981012 USING btree (key) - table_with_index_rep_identity_8981021 | CREATE UNIQUE INDEX uqx_8981021 ON citus_split_test_schema_no_deferred_drop.table_with_index_rep_identity_8981021 USING btree (key) -(3 rows) - - SELECT stxname FROM pg_statistic_ext - WHERE stxnamespace IN ( - SELECT oid - FROM pg_namespace - WHERE nspname IN ('citus_split_test_schema_no_deferred_drop') - ) - ORDER BY stxname ASC; - stxname ---------------------------------------------------------------------- - stats_on_sensors - stats_on_sensors_8981007 - stats_on_sensors_8981008 - stats_on_sensors_8981015 -(4 rows) - --- END : Display current state --- BEGIN: Should be able to change/drop constraints -\c - postgres - :master_port -SET citus.defer_drop_after_shard_split TO OFF; -SET ROLE test_shard_split_role_nodeferred_drop; -SET search_path TO "citus_split_test_schema_no_deferred_drop"; -ALTER INDEX index_on_sensors RENAME TO index_on_sensors_renamed; -ALTER INDEX index_on_sensors_renamed ALTER COLUMN 1 SET STATISTICS 200; -DROP STATISTICS stats_on_sensors; -DROP INDEX index_on_sensors_renamed; -ALTER TABLE sensors DROP CONSTRAINT fkey_table_to_dist; --- END: Should be able to change/drop constraints --- BEGIN: Split second time on another schema -SET search_path TO public; -SET citus.next_shard_id TO 8981031; -SELECT pg_catalog.citus_split_shard_by_split_points( - 8981007, - ARRAY['-2100000000'], - ARRAY[:worker_1_node, :worker_2_node], - 'force_logical'); - citus_split_shard_by_split_points ---------------------------------------------------------------------- - -(1 row) - -SET search_path TO "citus_split_test_schema_no_deferred_drop"; -SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport - FROM pg_dist_shard AS shard - INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid - INNER JOIN pg_dist_node node ON placement.groupid = node.groupid - INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid - WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass) - ORDER BY logicalrelid, shardminvalue::BIGINT; - shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport ---------------------------------------------------------------------- - 8981031 | sensors | -2147483648 | -2100000000 | localhost | 57637 - 8981032 | sensors | -2099999999 | -1073741824 | localhost | 57638 - 8981008 | sensors | -1073741823 | -1 | localhost | 57638 - 8981013 | sensors | 0 | 536870911 | localhost | 57637 - 8981014 | sensors | 536870912 | 1610612735 | localhost | 57637 - 8981015 | sensors | 1610612736 | 2147483647 | localhost | 57638 - 8981033 | colocated_dist_table | -2147483648 | -2100000000 | localhost | 57637 - 8981034 | colocated_dist_table | -2099999999 | -1073741824 | localhost | 57638 - 8981010 | colocated_dist_table | -1073741823 | -1 | localhost | 57638 - 8981016 | colocated_dist_table | 0 | 536870911 | localhost | 57637 - 8981017 | colocated_dist_table | 536870912 | 1610612735 | localhost | 57637 - 8981018 | colocated_dist_table | 1610612736 | 2147483647 | localhost | 57638 - 8981035 | table_with_index_rep_identity | -2147483648 | -2100000000 | localhost | 57637 - 8981036 | table_with_index_rep_identity | -2099999999 | -1073741824 | localhost | 57638 - 8981012 | table_with_index_rep_identity | -1073741823 | -1 | localhost | 57638 - 8981019 | table_with_index_rep_identity | 0 | 536870911 | localhost | 57637 - 8981020 | table_with_index_rep_identity | 536870912 | 1610612735 | localhost | 57637 - 8981021 | table_with_index_rep_identity | 1610612736 | 2147483647 | localhost | 57638 -(18 rows) - --- END: Split second time on another schema --- BEGIN: Create a co-located table with no replica identity. -CREATE TABLE table_no_rep_id (measureid integer); -SELECT create_distributed_table('table_no_rep_id', 'measureid', colocate_with:='sensors'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - --- END: Create a co-located table with no replica identity. --- BEGIN: Split a shard with shard_transfer_mode='auto' and with a colocated table with no replica identity -SET citus.next_shard_id TO 8981041; -SELECT pg_catalog.citus_split_shard_by_split_points( - 8981031, - ARRAY['-2120000000'], - ARRAY[:worker_1_node, :worker_2_node]); -ERROR: cannot use logical replication to transfer shards of the relation table_no_rep_id since it doesn't have a REPLICA IDENTITY or PRIMARY KEY -DETAIL: UPDATE and DELETE commands on the shard will error out during logical replication unless there is a REPLICA IDENTITY or PRIMARY KEY. -HINT: If you wish to continue without a replica identity set the shard_transfer_mode to 'force_logical' or 'block_writes'. -SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport - FROM pg_dist_shard AS shard - INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid - INNER JOIN pg_dist_node node ON placement.groupid = node.groupid - INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid - WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass) - ORDER BY logicalrelid, shardminvalue::BIGINT; - shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport ---------------------------------------------------------------------- - 8981031 | sensors | -2147483648 | -2100000000 | localhost | 57637 - 8981032 | sensors | -2099999999 | -1073741824 | localhost | 57638 - 8981008 | sensors | -1073741823 | -1 | localhost | 57638 - 8981013 | sensors | 0 | 536870911 | localhost | 57637 - 8981014 | sensors | 536870912 | 1610612735 | localhost | 57637 - 8981015 | sensors | 1610612736 | 2147483647 | localhost | 57638 - 8981033 | colocated_dist_table | -2147483648 | -2100000000 | localhost | 57637 - 8981034 | colocated_dist_table | -2099999999 | -1073741824 | localhost | 57638 - 8981010 | colocated_dist_table | -1073741823 | -1 | localhost | 57638 - 8981016 | colocated_dist_table | 0 | 536870911 | localhost | 57637 - 8981017 | colocated_dist_table | 536870912 | 1610612735 | localhost | 57637 - 8981018 | colocated_dist_table | 1610612736 | 2147483647 | localhost | 57638 - 8981035 | table_with_index_rep_identity | -2147483648 | -2100000000 | localhost | 57637 - 8981036 | table_with_index_rep_identity | -2099999999 | -1073741824 | localhost | 57638 - 8981012 | table_with_index_rep_identity | -1073741823 | -1 | localhost | 57638 - 8981019 | table_with_index_rep_identity | 0 | 536870911 | localhost | 57637 - 8981020 | table_with_index_rep_identity | 536870912 | 1610612735 | localhost | 57637 - 8981021 | table_with_index_rep_identity | 1610612736 | 2147483647 | localhost | 57638 -(18 rows) - --- END: Split a shard with shard_transfer_mode='auto' and with a colocated table with no replica identity --- BEGIN: Drop the co-located table with no replica identity. -DROP TABLE table_no_rep_id; --- END: Drop the co-located table with no replica identity. --- BEGIN: Split a shard with shard_transfer_mode='auto' and with all colocated tables has replica identity -SET citus.next_shard_id TO 8981041; -SELECT pg_catalog.citus_split_shard_by_split_points( - 8981031, - ARRAY['-2120000000'], - ARRAY[:worker_1_node, :worker_2_node], - 'auto'); - citus_split_shard_by_split_points ---------------------------------------------------------------------- - -(1 row) - -SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport - FROM pg_dist_shard AS shard - INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid - INNER JOIN pg_dist_node node ON placement.groupid = node.groupid - INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid - WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass) - ORDER BY logicalrelid, shardminvalue::BIGINT; - shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport ---------------------------------------------------------------------- - 8981041 | sensors | -2147483648 | -2120000000 | localhost | 57637 - 8981042 | sensors | -2119999999 | -2100000000 | localhost | 57638 - 8981032 | sensors | -2099999999 | -1073741824 | localhost | 57638 - 8981008 | sensors | -1073741823 | -1 | localhost | 57638 - 8981013 | sensors | 0 | 536870911 | localhost | 57637 - 8981014 | sensors | 536870912 | 1610612735 | localhost | 57637 - 8981015 | sensors | 1610612736 | 2147483647 | localhost | 57638 - 8981043 | colocated_dist_table | -2147483648 | -2120000000 | localhost | 57637 - 8981044 | colocated_dist_table | -2119999999 | -2100000000 | localhost | 57638 - 8981034 | colocated_dist_table | -2099999999 | -1073741824 | localhost | 57638 - 8981010 | colocated_dist_table | -1073741823 | -1 | localhost | 57638 - 8981016 | colocated_dist_table | 0 | 536870911 | localhost | 57637 - 8981017 | colocated_dist_table | 536870912 | 1610612735 | localhost | 57637 - 8981018 | colocated_dist_table | 1610612736 | 2147483647 | localhost | 57638 - 8981045 | table_with_index_rep_identity | -2147483648 | -2120000000 | localhost | 57637 - 8981046 | table_with_index_rep_identity | -2119999999 | -2100000000 | localhost | 57638 - 8981036 | table_with_index_rep_identity | -2099999999 | -1073741824 | localhost | 57638 - 8981012 | table_with_index_rep_identity | -1073741823 | -1 | localhost | 57638 - 8981019 | table_with_index_rep_identity | 0 | 536870911 | localhost | 57637 - 8981020 | table_with_index_rep_identity | 536870912 | 1610612735 | localhost | 57637 - 8981021 | table_with_index_rep_identity | 1610612736 | 2147483647 | localhost | 57638 -(21 rows) - --- END: Split a shard with shard_transfer_mode='auto' and with all colocated tables has replica identity --- BEGIN: Validate Data Count -SELECT COUNT(*) FROM sensors; - count ---------------------------------------------------------------------- - 1001 -(1 row) - -SELECT COUNT(*) FROM reference_table; - count ---------------------------------------------------------------------- - 1001 -(1 row) - -SELECT COUNT(*) FROM colocated_dist_table; - count ---------------------------------------------------------------------- - 1001 -(1 row) - --- END: Validate Data Count ---BEGIN : Cleanup -\c - postgres - :master_port -DROP SCHEMA "citus_split_test_schema_no_deferred_drop" CASCADE; -NOTICE: drop cascades to 4 other objects -DETAIL: drop cascades to table citus_split_test_schema_no_deferred_drop.sensors -drop cascades to table citus_split_test_schema_no_deferred_drop.reference_table -drop cascades to table citus_split_test_schema_no_deferred_drop.colocated_dist_table -drop cascades to table citus_split_test_schema_no_deferred_drop.table_with_index_rep_identity -SET citus.defer_drop_after_shard_split TO ON; ---END : Cleanup diff --git a/src/test/regress/expected/citus_split_shard_by_split_points.out b/src/test/regress/expected/citus_split_shard_by_split_points.out index 599864aac..737533c4d 100644 --- a/src/test/regress/expected/citus_split_shard_by_split_points.out +++ b/src/test/regress/expected/citus_split_shard_by_split_points.out @@ -207,7 +207,6 @@ SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, node SET ROLE test_split_role; SET search_path TO "citus_split_test_schema"; SET citus.next_shard_id TO 8981007; -SET citus.defer_drop_after_shard_move TO OFF; SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); citus_move_shard_placement --------------------------------------------------------------------- @@ -215,6 +214,8 @@ SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localho (1 row) -- END : Move one shard before we split it. +CALL citus_cleanup_orphaned_shards(); +NOTICE: cleaned up 3 orphaned shards -- BEGIN : Set node id variables SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset @@ -259,6 +260,8 @@ SELECT citus_move_shard_placement(8981007, 'localhost', :worker_1_port, 'localho (1 row) -- END : Move a shard post split. +CALL citus_cleanup_orphaned_shards(); +NOTICE: cleaned up 3 orphaned shards -- BEGIN : Display current state. SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport FROM pg_dist_shard AS shard diff --git a/src/test/regress/expected/citus_split_shard_no_deferred_drop.out b/src/test/regress/expected/citus_split_shard_no_deferred_drop.out deleted file mode 100644 index 9bcae7cb0..000000000 --- a/src/test/regress/expected/citus_split_shard_no_deferred_drop.out +++ /dev/null @@ -1,464 +0,0 @@ -/* -This suite runs without deferred drop enabled. -Citus Shard Split Test.The test is model similar to 'shard_move_constraints'. -Here is a high level overview of test plan: - 1. Create a table 'sensors' (ShardCount = 2) to be split. Add indexes and statistics on this table. - 2. Create two other tables: 'reference_table' and 'colocated_dist_table', co-located with sensors. - 3. Create Foreign key constraints between the two co-located distributed tables. - 4. Load data into the three tables. - 5. Move one of the shards for 'sensors' to test ShardMove -> Split. - 6. Trigger Split on both shards of 'sensors'. This will also split co-located tables. - 7. Move one of the split shard to test Split -> ShardMove. - 8. Split an already split shard second time on a different schema. -*/ -CREATE SCHEMA "citus_split_test_schema_no_deferred_drop"; -SET citus.defer_drop_after_shard_split TO OFF; -CREATE ROLE test_split_deferred_role WITH LOGIN; -GRANT USAGE, CREATE ON SCHEMA "citus_split_test_schema_no_deferred_drop" TO test_split_deferred_role; -SET ROLE test_split_deferred_role; -SET search_path TO "citus_split_test_schema_no_deferred_drop"; -SET citus.next_shard_id TO 8981000; -SET citus.next_placement_id TO 8610000; -SET citus.shard_count TO 2; -SET citus.shard_replication_factor TO 1; --- BEGIN: Create table to split, along with other co-located tables. Add indexes, statistics etc. -CREATE TABLE sensors( - measureid integer, - eventdatetime date, - measure_data jsonb, - meaure_quantity decimal(15, 2), - measure_status char(1), - measure_comment varchar(44), - PRIMARY KEY (measureid, eventdatetime, measure_data)); -CREATE INDEX index_on_sensors ON sensors(lower(measureid::text)); -ALTER INDEX index_on_sensors ALTER COLUMN 1 SET STATISTICS 1000; -CREATE INDEX hash_index_on_sensors ON sensors USING HASH((measure_data->'IsFailed')); -CREATE INDEX index_with_include_on_sensors ON sensors ((measure_data->'IsFailed')) INCLUDE (measure_data, eventdatetime, measure_status); -CREATE STATISTICS stats_on_sensors (dependencies) ON measureid, eventdatetime FROM sensors; -SELECT create_distributed_table('sensors', 'measureid', colocate_with:='none'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - --- END: Create table to split, along with other co-located tables. Add indexes, statistics etc. --- BEGIN: Create co-located distributed and reference tables. -CREATE TABLE reference_table (measureid integer PRIMARY KEY); -SELECT create_reference_table('reference_table'); - create_reference_table ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE colocated_dist_table (measureid integer PRIMARY KEY); -CLUSTER colocated_dist_table USING colocated_dist_table_pkey; -SELECT create_distributed_table('colocated_dist_table', 'measureid', colocate_with:='sensors'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE table_with_index_rep_identity(key int NOT NULL); -CREATE UNIQUE INDEX uqx ON table_with_index_rep_identity(key); -ALTER TABLE table_with_index_rep_identity REPLICA IDENTITY USING INDEX uqx; -CLUSTER table_with_index_rep_identity USING uqx; -SELECT create_distributed_table('table_with_index_rep_identity', 'key', colocate_with:='sensors'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - --- END: Create co-located distributed and reference tables. --- BEGIN : Create Foreign key constraints. -ALTER TABLE sensors ADD CONSTRAINT fkey_table_to_dist FOREIGN KEY (measureid) REFERENCES colocated_dist_table(measureid); --- END : Create Foreign key constraints. --- BEGIN : Load data into tables. -INSERT INTO reference_table SELECT i FROM generate_series(0,1000)i; -INSERT INTO colocated_dist_table SELECT i FROM generate_series(0,1000)i; -INSERT INTO sensors SELECT i, '2020-01-05', '{}', 11011.10, 'A', 'I <3 Citus' FROM generate_series(0,1000)i; -SELECT COUNT(*) FROM sensors; - count ---------------------------------------------------------------------- - 1001 -(1 row) - -SELECT COUNT(*) FROM reference_table; - count ---------------------------------------------------------------------- - 1001 -(1 row) - -SELECT COUNT(*) FROM colocated_dist_table; - count ---------------------------------------------------------------------- - 1001 -(1 row) - --- END: Load data into tables. --- BEGIN : Display current state. -SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport - FROM pg_dist_shard AS shard - INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid - INNER JOIN pg_dist_node node ON placement.groupid = node.groupid - INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid - WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass) - ORDER BY logicalrelid, shardminvalue::BIGINT; - shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport ---------------------------------------------------------------------- - 8981000 | sensors | -2147483648 | -1 | localhost | 57637 - 8981001 | sensors | 0 | 2147483647 | localhost | 57638 - 8981003 | colocated_dist_table | -2147483648 | -1 | localhost | 57637 - 8981004 | colocated_dist_table | 0 | 2147483647 | localhost | 57638 - 8981005 | table_with_index_rep_identity | -2147483648 | -1 | localhost | 57637 - 8981006 | table_with_index_rep_identity | 0 | 2147483647 | localhost | 57638 -(6 rows) - -\c - - - :worker_1_port - SET search_path TO "citus_split_test_schema_no_deferred_drop", public, pg_catalog; - SET citus.show_shards_for_app_name_prefixes = '*'; - SELECT tbl.relname, fk."Constraint", fk."Definition" - FROM pg_catalog.pg_class tbl - JOIN public.table_fkeys fk on tbl.oid = fk.relid - WHERE tbl.relname like 'sensors_%' - ORDER BY 1, 2; - relname | Constraint | Definition ---------------------------------------------------------------------- - sensors_8981000 | fkey_table_to_dist_8981000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981003(measureid) -(1 row) - - SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2; - tablename | indexdef ---------------------------------------------------------------------- - sensors_8981000 | CREATE INDEX hash_index_on_sensors_8981000 ON citus_split_test_schema_no_deferred_drop.sensors_8981000 USING hash (((measure_data -> 'IsFailed'::text))) - sensors_8981000 | CREATE INDEX index_on_sensors_8981000 ON citus_split_test_schema_no_deferred_drop.sensors_8981000 USING btree (lower((measureid)::text)) - sensors_8981000 | CREATE INDEX index_with_include_on_sensors_8981000 ON citus_split_test_schema_no_deferred_drop.sensors_8981000 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status) - sensors_8981000 | CREATE UNIQUE INDEX sensors_pkey_8981000 ON citus_split_test_schema_no_deferred_drop.sensors_8981000 USING btree (measureid, eventdatetime, measure_data) -(4 rows) - - SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2; - tablename | indexdef ---------------------------------------------------------------------- - table_with_index_rep_identity_8981005 | CREATE UNIQUE INDEX uqx_8981005 ON citus_split_test_schema_no_deferred_drop.table_with_index_rep_identity_8981005 USING btree (key) -(1 row) - - SELECT stxname FROM pg_statistic_ext - WHERE stxnamespace IN ( - SELECT oid - FROM pg_namespace - WHERE nspname IN ('citus_split_test_schema_no_deferred_drop') - ) - ORDER BY stxname ASC; - stxname ---------------------------------------------------------------------- - stats_on_sensors - stats_on_sensors_8981000 -(2 rows) - -\c - - - :worker_2_port - SET search_path TO "citus_split_test_schema_no_deferred_drop", public, pg_catalog; - SET citus.show_shards_for_app_name_prefixes = '*'; - SELECT tbl.relname, fk."Constraint", fk."Definition" - FROM pg_catalog.pg_class tbl - JOIN public.table_fkeys fk on tbl.oid = fk.relid - WHERE tbl.relname like 'sensors_%' - ORDER BY 1, 2; - relname | Constraint | Definition ---------------------------------------------------------------------- - sensors_8981001 | fkey_table_to_dist_8981001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981004(measureid) -(1 row) - - SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2; - tablename | indexdef ---------------------------------------------------------------------- - sensors_8981001 | CREATE INDEX hash_index_on_sensors_8981001 ON citus_split_test_schema_no_deferred_drop.sensors_8981001 USING hash (((measure_data -> 'IsFailed'::text))) - sensors_8981001 | CREATE INDEX index_on_sensors_8981001 ON citus_split_test_schema_no_deferred_drop.sensors_8981001 USING btree (lower((measureid)::text)) - sensors_8981001 | CREATE INDEX index_with_include_on_sensors_8981001 ON citus_split_test_schema_no_deferred_drop.sensors_8981001 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status) - sensors_8981001 | CREATE UNIQUE INDEX sensors_pkey_8981001 ON citus_split_test_schema_no_deferred_drop.sensors_8981001 USING btree (measureid, eventdatetime, measure_data) -(4 rows) - - SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2; - tablename | indexdef ---------------------------------------------------------------------- - table_with_index_rep_identity_8981006 | CREATE UNIQUE INDEX uqx_8981006 ON citus_split_test_schema_no_deferred_drop.table_with_index_rep_identity_8981006 USING btree (key) -(1 row) - - SELECT stxname FROM pg_statistic_ext - WHERE stxnamespace IN ( - SELECT oid - FROM pg_namespace - WHERE nspname IN ('citus_split_test_schema_no_deferred_drop') - ) - ORDER BY stxname ASC; - stxname ---------------------------------------------------------------------- - stats_on_sensors - stats_on_sensors_8981001 -(2 rows) - --- END : Display current state --- BEGIN : Move one shard before we split it. -\c - postgres - :master_port -SET citus.defer_drop_after_shard_split TO OFF; -SET ROLE test_split_deferred_role; -SET search_path TO "citus_split_test_schema_no_deferred_drop"; -SET citus.next_shard_id TO 8981007; -SET citus.defer_drop_after_shard_move TO OFF; -SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); - citus_move_shard_placement ---------------------------------------------------------------------- - -(1 row) - --- END : Move one shard before we split it. --- BEGIN : Set node id variables -SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset -SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset --- END : Set node id variables --- BEGIN : Split two shards : One with move and One without move. --- Perform 2 way split -SELECT pg_catalog.citus_split_shard_by_split_points( - 8981000, - ARRAY['-1073741824'], - ARRAY[:worker_1_node, :worker_2_node], - 'block_writes'); - citus_split_shard_by_split_points ---------------------------------------------------------------------- - -(1 row) - --- Perform 3 way split -SELECT pg_catalog.citus_split_shard_by_split_points( - 8981001, - ARRAY['536870911', '1610612735'], - ARRAY[:worker_1_node, :worker_1_node, :worker_2_node], - 'block_writes'); - citus_split_shard_by_split_points ---------------------------------------------------------------------- - -(1 row) - --- END : Split two shards : One with move and One without move. --- BEGIN : Move a shard post split. -SELECT citus_move_shard_placement(8981007, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='block_writes'); - citus_move_shard_placement ---------------------------------------------------------------------- - -(1 row) - --- END : Move a shard post split. --- BEGIN : Display current state. -SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport - FROM pg_dist_shard AS shard - INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid - INNER JOIN pg_dist_node node ON placement.groupid = node.groupid - INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid - WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass) - ORDER BY logicalrelid, shardminvalue::BIGINT; - shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport ---------------------------------------------------------------------- - 8981007 | sensors | -2147483648 | -1073741824 | localhost | 57638 - 8981008 | sensors | -1073741823 | -1 | localhost | 57638 - 8981013 | sensors | 0 | 536870911 | localhost | 57637 - 8981014 | sensors | 536870912 | 1610612735 | localhost | 57637 - 8981015 | sensors | 1610612736 | 2147483647 | localhost | 57638 - 8981009 | colocated_dist_table | -2147483648 | -1073741824 | localhost | 57638 - 8981010 | colocated_dist_table | -1073741823 | -1 | localhost | 57638 - 8981016 | colocated_dist_table | 0 | 536870911 | localhost | 57637 - 8981017 | colocated_dist_table | 536870912 | 1610612735 | localhost | 57637 - 8981018 | colocated_dist_table | 1610612736 | 2147483647 | localhost | 57638 - 8981011 | table_with_index_rep_identity | -2147483648 | -1073741824 | localhost | 57638 - 8981012 | table_with_index_rep_identity | -1073741823 | -1 | localhost | 57638 - 8981019 | table_with_index_rep_identity | 0 | 536870911 | localhost | 57637 - 8981020 | table_with_index_rep_identity | 536870912 | 1610612735 | localhost | 57637 - 8981021 | table_with_index_rep_identity | 1610612736 | 2147483647 | localhost | 57638 -(15 rows) - -\c - - - :worker_1_port - SET search_path TO "citus_split_test_schema_no_deferred_drop", public, pg_catalog; - SET citus.show_shards_for_app_name_prefixes = '*'; - SELECT tbl.relname, fk."Constraint", fk."Definition" - FROM pg_catalog.pg_class tbl - JOIN public.table_fkeys fk on tbl.oid = fk.relid - WHERE tbl.relname like 'sensors_%' - ORDER BY 1, 2; - relname | Constraint | Definition ---------------------------------------------------------------------- - sensors_8981013 | fkey_table_to_dist_8981013 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981016(measureid) - sensors_8981014 | fkey_table_to_dist_8981014 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981017(measureid) -(2 rows) - - SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2; - tablename | indexdef ---------------------------------------------------------------------- - sensors_8981013 | CREATE INDEX hash_index_on_sensors_8981013 ON citus_split_test_schema_no_deferred_drop.sensors_8981013 USING hash (((measure_data -> 'IsFailed'::text))) - sensors_8981013 | CREATE INDEX index_on_sensors_8981013 ON citus_split_test_schema_no_deferred_drop.sensors_8981013 USING btree (lower((measureid)::text)) - sensors_8981013 | CREATE INDEX index_with_include_on_sensors_8981013 ON citus_split_test_schema_no_deferred_drop.sensors_8981013 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status) - sensors_8981013 | CREATE UNIQUE INDEX sensors_pkey_8981013 ON citus_split_test_schema_no_deferred_drop.sensors_8981013 USING btree (measureid, eventdatetime, measure_data) - sensors_8981014 | CREATE INDEX hash_index_on_sensors_8981014 ON citus_split_test_schema_no_deferred_drop.sensors_8981014 USING hash (((measure_data -> 'IsFailed'::text))) - sensors_8981014 | CREATE INDEX index_on_sensors_8981014 ON citus_split_test_schema_no_deferred_drop.sensors_8981014 USING btree (lower((measureid)::text)) - sensors_8981014 | CREATE INDEX index_with_include_on_sensors_8981014 ON citus_split_test_schema_no_deferred_drop.sensors_8981014 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status) - sensors_8981014 | CREATE UNIQUE INDEX sensors_pkey_8981014 ON citus_split_test_schema_no_deferred_drop.sensors_8981014 USING btree (measureid, eventdatetime, measure_data) -(8 rows) - - SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2; - tablename | indexdef ---------------------------------------------------------------------- - table_with_index_rep_identity_8981019 | CREATE UNIQUE INDEX uqx_8981019 ON citus_split_test_schema_no_deferred_drop.table_with_index_rep_identity_8981019 USING btree (key) - table_with_index_rep_identity_8981020 | CREATE UNIQUE INDEX uqx_8981020 ON citus_split_test_schema_no_deferred_drop.table_with_index_rep_identity_8981020 USING btree (key) -(2 rows) - - SELECT stxname FROM pg_statistic_ext - WHERE stxnamespace IN ( - SELECT oid - FROM pg_namespace - WHERE nspname IN ('citus_split_test_schema_no_deferred_drop') - ) - ORDER BY stxname ASC; - stxname ---------------------------------------------------------------------- - stats_on_sensors - stats_on_sensors_8981013 - stats_on_sensors_8981014 -(3 rows) - -\c - - - :worker_2_port - SET search_path TO "citus_split_test_schema_no_deferred_drop", public, pg_catalog; - SET citus.show_shards_for_app_name_prefixes = '*'; - SELECT tbl.relname, fk."Constraint", fk."Definition" - FROM pg_catalog.pg_class tbl - JOIN public.table_fkeys fk on tbl.oid = fk.relid - WHERE tbl.relname like 'sensors_%' - ORDER BY 1, 2; - relname | Constraint | Definition ---------------------------------------------------------------------- - sensors_8981007 | fkey_table_to_dist_8981007 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981009(measureid) - sensors_8981008 | fkey_table_to_dist_8981008 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981010(measureid) - sensors_8981015 | fkey_table_to_dist_8981015 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981018(measureid) -(3 rows) - - SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2; - tablename | indexdef ---------------------------------------------------------------------- - sensors_8981007 | CREATE INDEX hash_index_on_sensors_8981007 ON citus_split_test_schema_no_deferred_drop.sensors_8981007 USING hash (((measure_data -> 'IsFailed'::text))) - sensors_8981007 | CREATE INDEX index_on_sensors_8981007 ON citus_split_test_schema_no_deferred_drop.sensors_8981007 USING btree (lower((measureid)::text)) - sensors_8981007 | CREATE INDEX index_with_include_on_sensors_8981007 ON citus_split_test_schema_no_deferred_drop.sensors_8981007 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status) - sensors_8981007 | CREATE UNIQUE INDEX sensors_pkey_8981007 ON citus_split_test_schema_no_deferred_drop.sensors_8981007 USING btree (measureid, eventdatetime, measure_data) - sensors_8981008 | CREATE INDEX hash_index_on_sensors_8981008 ON citus_split_test_schema_no_deferred_drop.sensors_8981008 USING hash (((measure_data -> 'IsFailed'::text))) - sensors_8981008 | CREATE INDEX index_on_sensors_8981008 ON citus_split_test_schema_no_deferred_drop.sensors_8981008 USING btree (lower((measureid)::text)) - sensors_8981008 | CREATE INDEX index_with_include_on_sensors_8981008 ON citus_split_test_schema_no_deferred_drop.sensors_8981008 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status) - sensors_8981008 | CREATE UNIQUE INDEX sensors_pkey_8981008 ON citus_split_test_schema_no_deferred_drop.sensors_8981008 USING btree (measureid, eventdatetime, measure_data) - sensors_8981015 | CREATE INDEX hash_index_on_sensors_8981015 ON citus_split_test_schema_no_deferred_drop.sensors_8981015 USING hash (((measure_data -> 'IsFailed'::text))) - sensors_8981015 | CREATE INDEX index_on_sensors_8981015 ON citus_split_test_schema_no_deferred_drop.sensors_8981015 USING btree (lower((measureid)::text)) - sensors_8981015 | CREATE INDEX index_with_include_on_sensors_8981015 ON citus_split_test_schema_no_deferred_drop.sensors_8981015 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status) - sensors_8981015 | CREATE UNIQUE INDEX sensors_pkey_8981015 ON citus_split_test_schema_no_deferred_drop.sensors_8981015 USING btree (measureid, eventdatetime, measure_data) -(12 rows) - - SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2; - tablename | indexdef ---------------------------------------------------------------------- - table_with_index_rep_identity_8981011 | CREATE UNIQUE INDEX uqx_8981011 ON citus_split_test_schema_no_deferred_drop.table_with_index_rep_identity_8981011 USING btree (key) - table_with_index_rep_identity_8981012 | CREATE UNIQUE INDEX uqx_8981012 ON citus_split_test_schema_no_deferred_drop.table_with_index_rep_identity_8981012 USING btree (key) - table_with_index_rep_identity_8981021 | CREATE UNIQUE INDEX uqx_8981021 ON citus_split_test_schema_no_deferred_drop.table_with_index_rep_identity_8981021 USING btree (key) -(3 rows) - - SELECT stxname FROM pg_statistic_ext - WHERE stxnamespace IN ( - SELECT oid - FROM pg_namespace - WHERE nspname IN ('citus_split_test_schema_no_deferred_drop') - ) - ORDER BY stxname ASC; - stxname ---------------------------------------------------------------------- - stats_on_sensors - stats_on_sensors_8981007 - stats_on_sensors_8981008 - stats_on_sensors_8981015 -(4 rows) - --- END : Display current state --- BEGIN: Should be able to change/drop constraints -\c - postgres - :master_port -SET citus.defer_drop_after_shard_split TO OFF; -SET ROLE test_split_deferred_role; -SET search_path TO "citus_split_test_schema_no_deferred_drop"; -ALTER INDEX index_on_sensors RENAME TO index_on_sensors_renamed; -ALTER INDEX index_on_sensors_renamed ALTER COLUMN 1 SET STATISTICS 200; -DROP STATISTICS stats_on_sensors; -DROP INDEX index_on_sensors_renamed; -ALTER TABLE sensors DROP CONSTRAINT fkey_table_to_dist; --- END: Should be able to change/drop constraints --- BEGIN: Split second time on another schema -SET search_path TO public; -SET citus.next_shard_id TO 8981031; -SELECT pg_catalog.citus_split_shard_by_split_points( - 8981007, - ARRAY['-2100000000'], - ARRAY[:worker_1_node, :worker_2_node], - 'block_writes'); - citus_split_shard_by_split_points ---------------------------------------------------------------------- - -(1 row) - -SET search_path TO "citus_split_test_schema_no_deferred_drop"; -SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport - FROM pg_dist_shard AS shard - INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid - INNER JOIN pg_dist_node node ON placement.groupid = node.groupid - INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid - WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass) - ORDER BY logicalrelid, shardminvalue::BIGINT; - shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport ---------------------------------------------------------------------- - 8981031 | sensors | -2147483648 | -2100000000 | localhost | 57637 - 8981032 | sensors | -2099999999 | -1073741824 | localhost | 57638 - 8981008 | sensors | -1073741823 | -1 | localhost | 57638 - 8981013 | sensors | 0 | 536870911 | localhost | 57637 - 8981014 | sensors | 536870912 | 1610612735 | localhost | 57637 - 8981015 | sensors | 1610612736 | 2147483647 | localhost | 57638 - 8981033 | colocated_dist_table | -2147483648 | -2100000000 | localhost | 57637 - 8981034 | colocated_dist_table | -2099999999 | -1073741824 | localhost | 57638 - 8981010 | colocated_dist_table | -1073741823 | -1 | localhost | 57638 - 8981016 | colocated_dist_table | 0 | 536870911 | localhost | 57637 - 8981017 | colocated_dist_table | 536870912 | 1610612735 | localhost | 57637 - 8981018 | colocated_dist_table | 1610612736 | 2147483647 | localhost | 57638 - 8981035 | table_with_index_rep_identity | -2147483648 | -2100000000 | localhost | 57637 - 8981036 | table_with_index_rep_identity | -2099999999 | -1073741824 | localhost | 57638 - 8981012 | table_with_index_rep_identity | -1073741823 | -1 | localhost | 57638 - 8981019 | table_with_index_rep_identity | 0 | 536870911 | localhost | 57637 - 8981020 | table_with_index_rep_identity | 536870912 | 1610612735 | localhost | 57637 - 8981021 | table_with_index_rep_identity | 1610612736 | 2147483647 | localhost | 57638 -(18 rows) - --- END: Split second time on another schema --- BEGIN: Validate Data Count -SELECT COUNT(*) FROM sensors; - count ---------------------------------------------------------------------- - 1001 -(1 row) - -SELECT COUNT(*) FROM reference_table; - count ---------------------------------------------------------------------- - 1001 -(1 row) - -SELECT COUNT(*) FROM colocated_dist_table; - count ---------------------------------------------------------------------- - 1001 -(1 row) - --- END: Validate Data Count ---BEGIN : Cleanup -\c - postgres - :master_port -DROP SCHEMA "citus_split_test_schema_no_deferred_drop" CASCADE; -NOTICE: drop cascades to 4 other objects -DETAIL: drop cascades to table citus_split_test_schema_no_deferred_drop.sensors -drop cascades to table citus_split_test_schema_no_deferred_drop.reference_table -drop cascades to table citus_split_test_schema_no_deferred_drop.colocated_dist_table -drop cascades to table citus_split_test_schema_no_deferred_drop.table_with_index_rep_identity -SET citus.defer_drop_after_shard_split TO ON; ---END : Cleanup diff --git a/src/test/regress/expected/failure_tenant_isolation.out b/src/test/regress/expected/failure_tenant_isolation.out index 7c604bd31..6be4580be 100644 --- a/src/test/regress/expected/failure_tenant_isolation.out +++ b/src/test/regress/expected/failure_tenant_isolation.out @@ -159,48 +159,6 @@ SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_1 SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes'); ERROR: canceling statement due to user request --- failure on dropping old colocated shard --- Disable deferred drop otherwise we will skip the drop and operation will succeed instead of failing. -SET citus.defer_drop_after_shard_split TO OFF; -SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").kill()'); - mitmproxy ---------------------------------------------------------------------- - -(1 row) - -SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes'); -ERROR: connection not open -CONTEXT: while executing command on localhost:xxxxx --- cancellation on dropping old colocated shard -SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").cancel(' || :pid || ')'); - mitmproxy ---------------------------------------------------------------------- - -(1 row) - -SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes'); -ERROR: canceling statement due to user request --- failure on dropping old shard -SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").kill()'); - mitmproxy ---------------------------------------------------------------------- - -(1 row) - -SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes'); -ERROR: connection not open -CONTEXT: while executing command on localhost:xxxxx --- cancellation on dropping old shard -SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").cancel(' || :pid || ')'); - mitmproxy ---------------------------------------------------------------------- - -(1 row) - -SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes'); -ERROR: canceling statement due to user request --- Re-enable deferred drop for rest of the tests. -SET citus.defer_drop_after_shard_split TO ON; -- failure on foreign key creation SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey FOREIGN KEY").kill()'); mitmproxy diff --git a/src/test/regress/expected/failure_tenant_isolation_nonblocking.out b/src/test/regress/expected/failure_tenant_isolation_nonblocking.out index c166a41af..b36ce1f08 100644 --- a/src/test/regress/expected/failure_tenant_isolation_nonblocking.out +++ b/src/test/regress/expected/failure_tenant_isolation_nonblocking.out @@ -311,49 +311,6 @@ SELECT citus.mitmproxy('conn.onQuery(query="select pg_drop_replication_slot").ca SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical'); ERROR: canceling statement due to user request --- failure on dropping old shard --- failure on dropping old colocated shard --- Disable deferred drop otherwise we will skip the drop and operation will succeed instead of failing. -SET citus.defer_drop_after_shard_split TO OFF; -SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").kill()'); - mitmproxy ---------------------------------------------------------------------- - -(1 row) - -SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical'); -ERROR: connection not open -CONTEXT: while executing command on localhost:xxxxx --- cancellation on dropping old shard -SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").cancel(' || :pid || ')'); - mitmproxy ---------------------------------------------------------------------- - -(1 row) - -SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical'); -ERROR: canceling statement due to user request --- failure on dropping old colocated shard -SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").kill()'); - mitmproxy ---------------------------------------------------------------------- - -(1 row) - -SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical'); -ERROR: connection not open -CONTEXT: while executing command on localhost:xxxxx --- cancellation on dropping old colocated shard -SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").cancel(' || :pid || ')'); - mitmproxy ---------------------------------------------------------------------- - -(1 row) - -SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical'); -ERROR: canceling statement due to user request --- Re-enable deferred drop for rest of the tests. -SET citus.defer_drop_after_shard_split TO ON; -- failure on foreign key creation SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey FOREIGN KEY").kill()'); mitmproxy diff --git a/src/test/regress/expected/isolation_logical_replication_multi_shard_commands.out b/src/test/regress/expected/isolation_logical_replication_multi_shard_commands.out index 194862301..efbc82e66 100644 --- a/src/test/regress/expected/isolation_logical_replication_multi_shard_commands.out +++ b/src/test/regress/expected/isolation_logical_replication_multi_shard_commands.out @@ -10,10 +10,10 @@ pg_advisory_lock (1 row) step s1-begin: - BEGIN; + BEGIN; step s1-move-placement: - SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; step s2-insert: INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172); @@ -33,7 +33,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-select: SELECT * FROM logical_replicate_placement order by y; @@ -63,10 +63,10 @@ pg_advisory_lock (1 row) step s1-begin: - BEGIN; + BEGIN; step s1-move-placement: - SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; step s2-upsert: INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172); @@ -87,7 +87,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-select: SELECT * FROM logical_replicate_placement order by y; @@ -120,10 +120,10 @@ pg_advisory_lock (1 row) step s1-begin: - BEGIN; + BEGIN; step s1-move-placement: - SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; step s2-update: UPDATE logical_replicate_placement SET y = y + 1; @@ -143,7 +143,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-select: SELECT * FROM logical_replicate_placement order by y; @@ -176,10 +176,10 @@ pg_advisory_lock (1 row) step s1-begin: - BEGIN; + BEGIN; step s1-move-placement: - SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; step s2-delete: DELETE FROM logical_replicate_placement; @@ -199,7 +199,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-select: SELECT * FROM logical_replicate_placement order by y; @@ -230,10 +230,10 @@ pg_advisory_lock (1 row) step s1-begin: - BEGIN; + BEGIN; step s1-move-placement: - SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; step s2-select: SELECT * FROM logical_replicate_placement ORDER BY y; @@ -259,7 +259,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-get-shard-distribution: select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; @@ -280,13 +280,13 @@ pg_advisory_lock (1 row) step s1-begin: - BEGIN; + BEGIN; step s1-move-placement: - SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; step s2-copy: - COPY logical_replicate_placement FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5\n15,30"' WITH CSV; + COPY logical_replicate_placement FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5\n15,30"' WITH CSV; step s3-release-advisory-lock: SELECT pg_advisory_unlock(44000, 55152); @@ -303,7 +303,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-select: SELECT * FROM logical_replicate_placement order by y; @@ -332,10 +332,10 @@ step s1-insert: INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172); step s1-begin: - BEGIN; + BEGIN; step s1-move-placement: - SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; master_move_shard_placement --------------------------------------------------------------------- @@ -343,10 +343,10 @@ master_move_shard_placement (1 row) step s2-truncate: - TRUNCATE logical_replicate_placement; + TRUNCATE logical_replicate_placement; step s1-end: - COMMIT; + COMMIT; step s2-truncate: <... completed> step s1-select: @@ -375,13 +375,13 @@ pg_advisory_lock (1 row) step s1-begin: - BEGIN; + BEGIN; step s1-move-placement: - SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; step s2-alter-table: - ALTER TABLE logical_replicate_placement ADD COLUMN z INT; + ALTER TABLE logical_replicate_placement ADD COLUMN z INT; step s3-release-advisory-lock: SELECT pg_advisory_unlock(44000, 55152); @@ -398,7 +398,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s2-alter-table: <... completed> step s1-select: @@ -419,19 +419,19 @@ nodeport starting permutation: s1-begin s2-begin s2-insert s1-move-placement s2-end s1-end s1-select s1-get-shard-distribution step s1-begin: - BEGIN; + BEGIN; step s2-begin: - BEGIN; + BEGIN; step s2-insert: INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172); step s1-move-placement: - SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; step s2-end: - COMMIT; + COMMIT; step s1-move-placement: <... completed> master_move_shard_placement @@ -440,7 +440,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-select: SELECT * FROM logical_replicate_placement order by y; @@ -462,20 +462,20 @@ nodeport starting permutation: s1-begin s2-begin s2-upsert s1-move-placement s2-end s1-end s1-select s1-get-shard-distribution step s1-begin: - BEGIN; + BEGIN; step s2-begin: - BEGIN; + BEGIN; step s2-upsert: INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172); INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172) ON CONFLICT (x) DO UPDATE SET y = logical_replicate_placement.y + 1; step s1-move-placement: - SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; step s2-end: - COMMIT; + COMMIT; step s1-move-placement: <... completed> master_move_shard_placement @@ -484,7 +484,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-select: SELECT * FROM logical_replicate_placement order by y; @@ -509,19 +509,19 @@ step s1-insert: INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172); step s1-begin: - BEGIN; + BEGIN; step s2-begin: - BEGIN; + BEGIN; step s2-update: UPDATE logical_replicate_placement SET y = y + 1; step s1-move-placement: - SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; step s2-end: - COMMIT; + COMMIT; step s1-move-placement: <... completed> master_move_shard_placement @@ -530,7 +530,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-select: SELECT * FROM logical_replicate_placement order by y; @@ -555,19 +555,19 @@ step s1-insert: INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172); step s1-begin: - BEGIN; + BEGIN; step s2-begin: - BEGIN; + BEGIN; step s2-delete: DELETE FROM logical_replicate_placement; step s1-move-placement: - SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; step s2-end: - COMMIT; + COMMIT; step s1-move-placement: <... completed> master_move_shard_placement @@ -576,7 +576,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-select: SELECT * FROM logical_replicate_placement order by y; @@ -599,10 +599,10 @@ step s1-insert: INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172); step s1-begin: - BEGIN; + BEGIN; step s2-begin: - BEGIN; + BEGIN; step s2-select: SELECT * FROM logical_replicate_placement ORDER BY y; @@ -614,7 +614,7 @@ step s2-select: (2 rows) step s1-move-placement: - SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; master_move_shard_placement --------------------------------------------------------------------- @@ -622,10 +622,10 @@ master_move_shard_placement (1 row) step s2-end: - COMMIT; + COMMIT; step s1-end: - COMMIT; + COMMIT; step s1-get-shard-distribution: select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; @@ -638,19 +638,19 @@ nodeport starting permutation: s1-begin s2-begin s2-copy s1-move-placement s2-end s1-end s1-select s1-get-shard-distribution step s1-begin: - BEGIN; + BEGIN; step s2-begin: - BEGIN; + BEGIN; step s2-copy: - COPY logical_replicate_placement FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5\n15,30"' WITH CSV; + COPY logical_replicate_placement FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5\n15,30"' WITH CSV; step s1-move-placement: - SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; step s2-end: - COMMIT; + COMMIT; step s1-move-placement: <... completed> master_move_shard_placement @@ -659,7 +659,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-select: SELECT * FROM logical_replicate_placement order by y; @@ -688,19 +688,19 @@ step s1-insert: INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172); step s1-begin: - BEGIN; + BEGIN; step s2-begin: - BEGIN; + BEGIN; step s2-truncate: - TRUNCATE logical_replicate_placement; + TRUNCATE logical_replicate_placement; step s1-move-placement: - SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; step s2-end: - COMMIT; + COMMIT; step s1-move-placement: <... completed> master_move_shard_placement @@ -709,7 +709,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-select: SELECT * FROM logical_replicate_placement order by y; @@ -729,19 +729,19 @@ nodeport starting permutation: s1-begin s2-begin s2-alter-table s1-move-placement s2-end s1-end s1-select s1-get-shard-distribution step s1-begin: - BEGIN; + BEGIN; step s2-begin: - BEGIN; + BEGIN; step s2-alter-table: - ALTER TABLE logical_replicate_placement ADD COLUMN z INT; + ALTER TABLE logical_replicate_placement ADD COLUMN z INT; step s1-move-placement: - SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; step s2-end: - COMMIT; + COMMIT; step s1-move-placement: <... completed> master_move_shard_placement @@ -750,7 +750,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-select: SELECT * FROM logical_replicate_placement order by y; diff --git a/src/test/regress/expected/isolation_logical_replication_single_shard_commands.out b/src/test/regress/expected/isolation_logical_replication_single_shard_commands.out index 586e700c7..cbb5e3d8d 100644 --- a/src/test/regress/expected/isolation_logical_replication_single_shard_commands.out +++ b/src/test/regress/expected/isolation_logical_replication_single_shard_commands.out @@ -10,10 +10,10 @@ pg_advisory_lock (1 row) step s1-begin: - BEGIN; + BEGIN; step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); step s2-insert: INSERT INTO logical_replicate_placement VALUES (15, 15); @@ -33,7 +33,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-select: SELECT * FROM logical_replicate_placement order by y; @@ -62,10 +62,10 @@ pg_advisory_lock (1 row) step s1-begin: - BEGIN; + BEGIN; step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); step s2-upsert: INSERT INTO logical_replicate_placement VALUES (15, 15); @@ -86,7 +86,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-select: SELECT * FROM logical_replicate_placement order by y; @@ -118,10 +118,10 @@ pg_advisory_lock (1 row) step s1-begin: - BEGIN; + BEGIN; step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); step s2-update: UPDATE logical_replicate_placement SET y = y + 1 WHERE x = 15; @@ -141,7 +141,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-select: SELECT * FROM logical_replicate_placement order by y; @@ -173,10 +173,10 @@ pg_advisory_lock (1 row) step s1-begin: - BEGIN; + BEGIN; step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); step s2-delete: DELETE FROM logical_replicate_placement WHERE x = 15; @@ -196,7 +196,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-select: SELECT * FROM logical_replicate_placement order by y; @@ -227,10 +227,10 @@ pg_advisory_lock (1 row) step s1-begin: - BEGIN; + BEGIN; step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); step s2-select: SELECT * FROM logical_replicate_placement ORDER BY y; @@ -255,7 +255,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-get-shard-distribution: select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; @@ -279,10 +279,10 @@ pg_advisory_lock (1 row) step s1-begin: - BEGIN; + BEGIN; step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); step s2-select-for-update: SELECT * FROM logical_replicate_placement WHERE x=15 FOR UPDATE; @@ -307,7 +307,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-get-shard-distribution: select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; @@ -320,7 +320,7 @@ nodeport starting permutation: s1-begin s2-begin s2-insert s1-move-placement s2-end s1-end s1-select s1-get-shard-distribution step s1-begin: - BEGIN; + BEGIN; step s2-begin: BEGIN; @@ -329,10 +329,10 @@ step s2-insert: INSERT INTO logical_replicate_placement VALUES (15, 15); step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); step s2-end: - COMMIT; + COMMIT; step s1-move-placement: <... completed> master_move_shard_placement @@ -341,7 +341,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-select: SELECT * FROM logical_replicate_placement order by y; @@ -362,7 +362,7 @@ nodeport starting permutation: s1-begin s2-begin s2-upsert s1-move-placement s2-end s1-end s1-select s1-get-shard-distribution step s1-begin: - BEGIN; + BEGIN; step s2-begin: BEGIN; @@ -372,10 +372,10 @@ step s2-upsert: INSERT INTO logical_replicate_placement VALUES (15, 15) ON CONFLICT (x) DO UPDATE SET y = logical_replicate_placement.y + 1; step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); step s2-end: - COMMIT; + COMMIT; step s1-move-placement: <... completed> master_move_shard_placement @@ -384,7 +384,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-select: SELECT * FROM logical_replicate_placement order by y; @@ -408,7 +408,7 @@ step s1-insert: INSERT INTO logical_replicate_placement VALUES (15, 15); step s1-begin: - BEGIN; + BEGIN; step s2-begin: BEGIN; @@ -417,10 +417,10 @@ step s2-update: UPDATE logical_replicate_placement SET y = y + 1 WHERE x = 15; step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); step s2-end: - COMMIT; + COMMIT; step s1-move-placement: <... completed> master_move_shard_placement @@ -429,7 +429,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-select: SELECT * FROM logical_replicate_placement order by y; @@ -453,7 +453,7 @@ step s1-insert: INSERT INTO logical_replicate_placement VALUES (15, 15); step s1-begin: - BEGIN; + BEGIN; step s2-begin: BEGIN; @@ -462,10 +462,10 @@ step s2-delete: DELETE FROM logical_replicate_placement WHERE x = 15; step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); step s2-end: - COMMIT; + COMMIT; step s1-move-placement: <... completed> master_move_shard_placement @@ -474,7 +474,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-select: SELECT * FROM logical_replicate_placement order by y; @@ -497,7 +497,7 @@ step s1-insert: INSERT INTO logical_replicate_placement VALUES (15, 15); step s1-begin: - BEGIN; + BEGIN; step s2-begin: BEGIN; @@ -511,7 +511,7 @@ step s2-select: (1 row) step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_move_shard_placement --------------------------------------------------------------------- @@ -519,10 +519,10 @@ master_move_shard_placement (1 row) step s2-end: - COMMIT; + COMMIT; step s1-end: - COMMIT; + COMMIT; step s1-get-shard-distribution: select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; @@ -538,7 +538,7 @@ step s1-insert: INSERT INTO logical_replicate_placement VALUES (15, 15); step s1-begin: - BEGIN; + BEGIN; step s2-begin: BEGIN; @@ -552,10 +552,10 @@ step s2-select-for-update: (1 row) step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); step s2-end: - COMMIT; + COMMIT; step s1-move-placement: <... completed> master_move_shard_placement @@ -564,7 +564,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-get-shard-distribution: select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; @@ -577,13 +577,13 @@ nodeport starting permutation: s1-begin s2-begin s1-move-placement s2-move-placement s1-end s2-end step s1-begin: - BEGIN; + BEGIN; step s2-begin: BEGIN; step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_move_shard_placement --------------------------------------------------------------------- @@ -591,14 +591,14 @@ master_move_shard_placement (1 row) step s2-move-placement: - SELECT master_move_shard_placement( - get_shard_id_for_distribution_column('logical_replicate_placement', 4), - 'localhost', 57637, 'localhost', 57638); + SELECT master_move_shard_placement( + get_shard_id_for_distribution_column('logical_replicate_placement', 4), + 'localhost', 57637, 'localhost', 57638); ERROR: could not acquire the lock required to move public.logical_replicate_placement step s1-end: - COMMIT; + COMMIT; step s2-end: - COMMIT; + COMMIT; diff --git a/src/test/regress/expected/isolation_rebalancer_deferred_drop.out b/src/test/regress/expected/isolation_rebalancer_deferred_drop.out index 169138143..eb8659d72 100644 --- a/src/test/regress/expected/isolation_rebalancer_deferred_drop.out +++ b/src/test/regress/expected/isolation_rebalancer_deferred_drop.out @@ -96,7 +96,7 @@ step s1-commit: COMMIT; step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -160,7 +160,7 @@ step s1-commit: COMMIT; step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -187,35 +187,3 @@ run_try_drop_marked_shards step s1-commit: COMMIT; - -starting permutation: s1-begin s2-begin s2-select s1-move-placement-without-deferred s2-commit s1-commit -step s1-begin: - BEGIN; - -step s2-begin: - BEGIN; - -step s2-select: - SELECT COUNT(*) FROM t1; - -count ---------------------------------------------------------------------- - 0 -(1 row) - -step s1-move-placement-without-deferred: - SET citus.defer_drop_after_shard_move TO OFF; - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); - -step s2-commit: - COMMIT; - -step s1-move-placement-without-deferred: <... completed> -master_move_shard_placement ---------------------------------------------------------------------- - -(1 row) - -step s1-commit: - COMMIT; - diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index ee842efc8..9ae2b07e6 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -1560,7 +1560,6 @@ CREATE EXTENSION citus; CREATE TABLE test (x int, y int); INSERT INTO test VALUES (1,2); SET citus.shard_replication_factor TO 1; -SET citus.defer_drop_after_shard_split TO off; SELECT create_distributed_table_concurrently('test','x'); NOTICE: relation test does not have a REPLICA IDENTITY or PRIMARY KEY DETAIL: UPDATE and DELETE commands on the relation will error out during create_distributed_table_concurrently unless there is a REPLICA IDENTITY or PRIMARY KEY. INSERT commands will still work. diff --git a/src/test/regress/expected/multi_tenant_isolation.out b/src/test/regress/expected/multi_tenant_isolation.out index 021b43972..03e4f9a2f 100644 --- a/src/test/regress/expected/multi_tenant_isolation.out +++ b/src/test/regress/expected/multi_tenant_isolation.out @@ -798,84 +798,6 @@ $$; RESET citus.enable_metadata_sync; CREATE EVENT TRIGGER abort_drop ON sql_drop EXECUTE PROCEDURE abort_drop_command(); -\c - postgres - :master_port --- Disable deferred drop otherwise we will skip the drop and operation will succeed instead of failing. -SET citus.defer_drop_after_shard_split TO OFF; -SET ROLE mx_isolation_role_ent; -SET search_path to "Tenant Isolation"; -\set VERBOSITY terse -SELECT isolate_tenant_to_new_shard('orders_streaming', 104, 'CASCADE', shard_transfer_mode => 'block_writes'); -WARNING: command DROP TABLE is disabled -WARNING: command DROP TABLE is disabled -WARNING: command DROP TABLE is disabled -WARNING: command DROP TABLE is disabled -WARNING: command DROP TABLE is disabled -WARNING: command DROP TABLE is disabled -WARNING: failed to clean up 6 orphaned shards out of 6 after a isolate_tenant_to_new_shard operation failed -ERROR: command DROP TABLE is disabled -\set VERBOSITY default --- check if metadata is changed -SELECT * FROM pg_dist_shard - WHERE logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass - ORDER BY shardminvalue::BIGINT, logicalrelid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue ---------------------------------------------------------------------- - lineitem_streaming | 1230040 | t | -2147483648 | -2147483648 - orders_streaming | 1230042 | t | -2147483648 | -2147483648 - lineitem_streaming | 1230041 | t | -2147483647 | -136164586 - orders_streaming | 1230043 | t | -2147483647 | -136164586 - lineitem_streaming | 1230035 | t | -136164585 | -136164585 - orders_streaming | 1230038 | t | -136164585 | -136164585 - lineitem_streaming | 1230036 | t | -136164584 | -85071815 - orders_streaming | 1230039 | t | -136164584 | -85071815 - lineitem_streaming | 1230011 | t | -85071814 | -85071814 - orders_streaming | 1230014 | t | -85071814 | -85071814 - lineitem_streaming | 1230012 | t | -85071813 | -1 - orders_streaming | 1230015 | t | -85071813 | -1 - lineitem_streaming | 1230004 | t | 0 | 108199380 - orders_streaming | 1230007 | t | 0 | 108199380 - lineitem_streaming | 1230005 | t | 108199381 | 108199381 - orders_streaming | 1230008 | t | 108199381 | 108199381 - lineitem_streaming | 1230028 | t | 108199382 | 412880111 - orders_streaming | 1230031 | t | 108199382 | 412880111 - lineitem_streaming | 1230029 | t | 412880112 | 412880112 - orders_streaming | 1230032 | t | 412880112 | 412880112 - lineitem_streaming | 1230044 | t | 412880113 | 2147483646 - orders_streaming | 1230046 | t | 412880113 | 2147483646 - lineitem_streaming | 1230045 | t | 2147483647 | 2147483647 - orders_streaming | 1230047 | t | 2147483647 | 2147483647 -(24 rows) - -\c - - - :worker_1_port -SET search_path to "Tenant Isolation"; --- however, new tables are already created -SET citus.override_table_visibility TO false; -\d - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- - Tenant Isolation | lineitem_streaming | table | mx_isolation_role_ent - Tenant Isolation | lineitem_streaming_1230011 | table | mx_isolation_role_ent - Tenant Isolation | lineitem_streaming_1230012 | table | mx_isolation_role_ent - Tenant Isolation | lineitem_streaming_1230035 | table | mx_isolation_role_ent - Tenant Isolation | lineitem_streaming_1230036 | table | mx_isolation_role_ent - Tenant Isolation | lineitem_streaming_1230040 | table | mx_isolation_role_ent - Tenant Isolation | lineitem_streaming_1230041 | table | mx_isolation_role_ent - Tenant Isolation | lineitem_streaming_1230056 | table | mx_isolation_role_ent - Tenant Isolation | lineitem_streaming_1230057 | table | mx_isolation_role_ent - Tenant Isolation | lineitem_streaming_1230058 | table | mx_isolation_role_ent - Tenant Isolation | orders_streaming | table | mx_isolation_role_ent - Tenant Isolation | orders_streaming_1230014 | table | mx_isolation_role_ent - Tenant Isolation | orders_streaming_1230015 | table | mx_isolation_role_ent - Tenant Isolation | orders_streaming_1230038 | table | mx_isolation_role_ent - Tenant Isolation | orders_streaming_1230039 | table | mx_isolation_role_ent - Tenant Isolation | orders_streaming_1230042 | table | mx_isolation_role_ent - Tenant Isolation | orders_streaming_1230043 | table | mx_isolation_role_ent - Tenant Isolation | orders_streaming_1230059 | table | mx_isolation_role_ent - Tenant Isolation | orders_streaming_1230060 | table | mx_isolation_role_ent - Tenant Isolation | orders_streaming_1230061 | table | mx_isolation_role_ent -(20 rows) - \c - postgres - :worker_1_port DROP EVENT TRIGGER abort_drop; \c - mx_isolation_role_ent - :master_port @@ -1085,7 +1007,7 @@ INSERT INTO test_colocated_table_3 SELECT i, i FROM generate_series (0, 100) i; SELECT isolate_tenant_to_new_shard('test_colocated_table_2', 1, 'CASCADE', shard_transfer_mode => 'block_writes'); isolate_tenant_to_new_shard --------------------------------------------------------------------- - 1230095 + 1230089 (1 row) SELECT count(*) FROM test_colocated_table_2; @@ -1096,7 +1018,7 @@ SELECT count(*) FROM test_colocated_table_2; \c - postgres - :master_port CALL pg_catalog.citus_cleanup_orphaned_resources(); -NOTICE: cleaned up 10 orphaned resources +NOTICE: cleaned up 4 orphaned resources \c - postgres - :worker_1_port -- show the foreign keys of the main table & its colocated shard on other tables SELECT tbl.relname, fk."Constraint", fk."Definition" @@ -1107,47 +1029,47 @@ ORDER BY 1, 2; relname | Constraint | Definition --------------------------------------------------------------------- test_colocated_table_1 | test_colocated_table_1_id_fkey | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1(id) - test_colocated_table_1_1230069 | test_colocated_table_1_id_fkey_1230069 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230069(id) - test_colocated_table_1_1230071 | test_colocated_table_1_id_fkey_1230071 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230071(id) - test_colocated_table_1_1230073 | test_colocated_table_1_id_fkey_1230073 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230073(id) - test_colocated_table_1_1230091 | test_colocated_table_1_id_fkey_1230091 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230091(id) - test_colocated_table_1_1230092 | test_colocated_table_1_id_fkey_1230092 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230092(id) - test_colocated_table_1_1230093 | test_colocated_table_1_id_fkey_1230093 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230093(id) + test_colocated_table_1_1230063 | test_colocated_table_1_id_fkey_1230063 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230063(id) + test_colocated_table_1_1230065 | test_colocated_table_1_id_fkey_1230065 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230065(id) + test_colocated_table_1_1230067 | test_colocated_table_1_id_fkey_1230067 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230067(id) + test_colocated_table_1_1230085 | test_colocated_table_1_id_fkey_1230085 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230085(id) + test_colocated_table_1_1230086 | test_colocated_table_1_id_fkey_1230086 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230086(id) + test_colocated_table_1_1230087 | test_colocated_table_1_id_fkey_1230087 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230087(id) test_colocated_table_2 | test_colocated_table_2_id_fkey | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1(id) test_colocated_table_2 | test_colocated_table_2_value_1_fkey | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey(id) - test_colocated_table_2_1230077 | test_colocated_table_2_id_fkey_1230077 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230069(id) - test_colocated_table_2_1230077 | test_colocated_table_2_value_1_fkey_1230077 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230066(id) - test_colocated_table_2_1230079 | test_colocated_table_2_id_fkey_1230079 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230071(id) - test_colocated_table_2_1230079 | test_colocated_table_2_value_1_fkey_1230079 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230066(id) - test_colocated_table_2_1230081 | test_colocated_table_2_id_fkey_1230081 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230073(id) - test_colocated_table_2_1230081 | test_colocated_table_2_value_1_fkey_1230081 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230066(id) - test_colocated_table_2_1230094 | test_colocated_table_2_id_fkey_1230094 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230091(id) - test_colocated_table_2_1230094 | test_colocated_table_2_value_1_fkey_1230094 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230066(id) - test_colocated_table_2_1230095 | test_colocated_table_2_id_fkey_1230095 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230092(id) - test_colocated_table_2_1230095 | test_colocated_table_2_value_1_fkey_1230095 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230066(id) - test_colocated_table_2_1230096 | test_colocated_table_2_id_fkey_1230096 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230093(id) - test_colocated_table_2_1230096 | test_colocated_table_2_value_1_fkey_1230096 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230066(id) + test_colocated_table_2_1230071 | test_colocated_table_2_id_fkey_1230071 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230063(id) + test_colocated_table_2_1230071 | test_colocated_table_2_value_1_fkey_1230071 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230060(id) + test_colocated_table_2_1230073 | test_colocated_table_2_id_fkey_1230073 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230065(id) + test_colocated_table_2_1230073 | test_colocated_table_2_value_1_fkey_1230073 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230060(id) + test_colocated_table_2_1230075 | test_colocated_table_2_id_fkey_1230075 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230067(id) + test_colocated_table_2_1230075 | test_colocated_table_2_value_1_fkey_1230075 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230060(id) + test_colocated_table_2_1230088 | test_colocated_table_2_id_fkey_1230088 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230085(id) + test_colocated_table_2_1230088 | test_colocated_table_2_value_1_fkey_1230088 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230060(id) + test_colocated_table_2_1230089 | test_colocated_table_2_id_fkey_1230089 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230086(id) + test_colocated_table_2_1230089 | test_colocated_table_2_value_1_fkey_1230089 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230060(id) + test_colocated_table_2_1230090 | test_colocated_table_2_id_fkey_1230090 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230087(id) + test_colocated_table_2_1230090 | test_colocated_table_2_value_1_fkey_1230090 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230060(id) test_colocated_table_3 | test_colocated_table_3_id_fkey | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1(id) test_colocated_table_3 | test_colocated_table_3_id_fkey1 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2(id) test_colocated_table_3 | test_colocated_table_3_value_1_fkey | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey(id) - test_colocated_table_3_1230085 | test_colocated_table_3_id_fkey1_1230085 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2_1230077(id) - test_colocated_table_3_1230085 | test_colocated_table_3_id_fkey_1230085 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230069(id) - test_colocated_table_3_1230085 | test_colocated_table_3_value_1_fkey_1230085 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230066(id) - test_colocated_table_3_1230087 | test_colocated_table_3_id_fkey1_1230087 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2_1230079(id) - test_colocated_table_3_1230087 | test_colocated_table_3_id_fkey_1230087 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230071(id) - test_colocated_table_3_1230087 | test_colocated_table_3_value_1_fkey_1230087 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230066(id) - test_colocated_table_3_1230089 | test_colocated_table_3_id_fkey1_1230089 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2_1230081(id) - test_colocated_table_3_1230089 | test_colocated_table_3_id_fkey_1230089 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230073(id) - test_colocated_table_3_1230089 | test_colocated_table_3_value_1_fkey_1230089 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230066(id) - test_colocated_table_3_1230097 | test_colocated_table_3_id_fkey1_1230097 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2_1230094(id) - test_colocated_table_3_1230097 | test_colocated_table_3_id_fkey_1230097 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230091(id) - test_colocated_table_3_1230097 | test_colocated_table_3_value_1_fkey_1230097 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230066(id) - test_colocated_table_3_1230098 | test_colocated_table_3_id_fkey1_1230098 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2_1230095(id) - test_colocated_table_3_1230098 | test_colocated_table_3_id_fkey_1230098 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230092(id) - test_colocated_table_3_1230098 | test_colocated_table_3_value_1_fkey_1230098 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230066(id) - test_colocated_table_3_1230099 | test_colocated_table_3_id_fkey1_1230099 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2_1230096(id) - test_colocated_table_3_1230099 | test_colocated_table_3_id_fkey_1230099 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230093(id) - test_colocated_table_3_1230099 | test_colocated_table_3_value_1_fkey_1230099 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230066(id) + test_colocated_table_3_1230079 | test_colocated_table_3_id_fkey1_1230079 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2_1230071(id) + test_colocated_table_3_1230079 | test_colocated_table_3_id_fkey_1230079 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230063(id) + test_colocated_table_3_1230079 | test_colocated_table_3_value_1_fkey_1230079 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230060(id) + test_colocated_table_3_1230081 | test_colocated_table_3_id_fkey1_1230081 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2_1230073(id) + test_colocated_table_3_1230081 | test_colocated_table_3_id_fkey_1230081 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230065(id) + test_colocated_table_3_1230081 | test_colocated_table_3_value_1_fkey_1230081 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230060(id) + test_colocated_table_3_1230083 | test_colocated_table_3_id_fkey1_1230083 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2_1230075(id) + test_colocated_table_3_1230083 | test_colocated_table_3_id_fkey_1230083 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230067(id) + test_colocated_table_3_1230083 | test_colocated_table_3_value_1_fkey_1230083 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230060(id) + test_colocated_table_3_1230091 | test_colocated_table_3_id_fkey1_1230091 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2_1230088(id) + test_colocated_table_3_1230091 | test_colocated_table_3_id_fkey_1230091 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230085(id) + test_colocated_table_3_1230091 | test_colocated_table_3_value_1_fkey_1230091 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230060(id) + test_colocated_table_3_1230092 | test_colocated_table_3_id_fkey1_1230092 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2_1230089(id) + test_colocated_table_3_1230092 | test_colocated_table_3_id_fkey_1230092 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230086(id) + test_colocated_table_3_1230092 | test_colocated_table_3_value_1_fkey_1230092 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230060(id) + test_colocated_table_3_1230093 | test_colocated_table_3_id_fkey1_1230093 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2_1230090(id) + test_colocated_table_3_1230093 | test_colocated_table_3_id_fkey_1230093 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230087(id) + test_colocated_table_3_1230093 | test_colocated_table_3_value_1_fkey_1230093 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230060(id) (42 rows) \c - mx_isolation_role_ent - :master_port diff --git a/src/test/regress/expected/multi_transaction_recovery.out b/src/test/regress/expected/multi_transaction_recovery.out index 1e4cea224..509a87acf 100644 --- a/src/test/regress/expected/multi_transaction_recovery.out +++ b/src/test/regress/expected/multi_transaction_recovery.out @@ -363,7 +363,6 @@ SELECT COUNT(*) FROM pg_dist_transaction; (1 row) BEGIN; -SET LOCAL citus.defer_drop_after_shard_move TO OFF; SELECT citus_move_shard_placement((SELECT * FROM selected_shard), 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode := 'block_writes'); citus_move_shard_placement --------------------------------------------------------------------- diff --git a/src/test/regress/expected/shard_move_deferred_delete.out b/src/test/regress/expected/shard_move_deferred_delete.out index ea85f9bfd..d8be7068d 100644 --- a/src/test/regress/expected/shard_move_deferred_delete.out +++ b/src/test/regress/expected/shard_move_deferred_delete.out @@ -4,7 +4,6 @@ SET citus.next_shard_id TO 20000000; SET citus.shard_count TO 6; SET citus.shard_replication_factor TO 1; -SET citus.defer_drop_after_shard_move TO on; CREATE SCHEMA shard_move_deferred_delete; SET search_path TO shard_move_deferred_delete; CREATE TABLE t1 ( id int PRIMARY KEY); diff --git a/src/test/regress/spec/isolation_rebalancer_deferred_drop.spec b/src/test/regress/spec/isolation_rebalancer_deferred_drop.spec index 7de6d69cc..f27378fe5 100644 --- a/src/test/regress/spec/isolation_rebalancer_deferred_drop.spec +++ b/src/test/regress/spec/isolation_rebalancer_deferred_drop.spec @@ -65,12 +65,6 @@ step "s1-move-placement-back" SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57638, 'localhost', 57637); } -step "s1-move-placement-without-deferred" { - SET citus.defer_drop_after_shard_move TO OFF; - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); - -} - step "s1-drop-marked-shards" { SET client_min_messages to NOTICE; @@ -88,10 +82,6 @@ step "s1-commit" session "s2" -step "s2-begin" { - BEGIN; -} - step "s2-drop-old-shards" { SELECT run_try_drop_marked_shards(); } @@ -112,20 +102,12 @@ step "s2-lock-table-on-worker" SELECT run_commands_on_session_level_connection_to_node('LOCK TABLE t1_120000'); } -step "s2-select" { - SELECT COUNT(*) FROM t1; -} - step "s2-drop-marked-shards" { SET client_min_messages to DEBUG1; CALL isolation_cleanup_orphaned_shards(); } -step "s2-commit" { - COMMIT; -} - permutation "s1-begin" "s1-move-placement" "s1-drop-marked-shards" "s2-drop-marked-shards" "s1-commit" permutation "s1-begin" "s1-move-placement" "s2-drop-marked-shards" "s1-drop-marked-shards" "s1-commit" @@ -134,4 +116,3 @@ permutation "s1-begin" "s1-move-placement" "s2-start-session-level-connection" " permutation "s1-begin" "s1-move-placement" "s2-start-session-level-connection" "s2-lock-table-on-worker" "s1-commit" "s1-begin" "s1-move-placement-back" "s1-commit" "s2-stop-connection" // make sure we error if we cannot get the lock on pg_dist_placement permutation "s1-begin" "s1-lock-pg-dist-placement" "s2-drop-old-shards" "s1-commit" -permutation "s1-begin" "s2-begin" "s2-select" "s1-move-placement-without-deferred" "s2-commit" "s1-commit" diff --git a/src/test/regress/split_schedule b/src/test/regress/split_schedule index 716e9dd08..62ba469bf 100644 --- a/src/test/regress/split_schedule +++ b/src/test/regress/split_schedule @@ -17,12 +17,10 @@ test: worker_split_binary_copy_test test: worker_split_text_copy_test test: citus_split_shard_by_split_points_negative test: citus_split_shard_by_split_points -test: citus_split_shard_no_deferred_drop test: citus_split_shard_by_split_points_failure # Name citus_split_shard_by_split_points_columnar_partitioned was too long and being truncated. # use citus_split_shard_columnar_partitioned instead. test: citus_split_shard_columnar_partitioned test: citus_non_blocking_split_shards -test: citus_non_blocking_splits_no_deferred_drop test: citus_non_blocking_split_shard_cleanup test: citus_non_blocking_split_columnar diff --git a/src/test/regress/sql/citus_non_blocking_split_shard_cleanup.sql b/src/test/regress/sql/citus_non_blocking_split_shard_cleanup.sql index 334c07b1a..5fda60d62 100644 --- a/src/test/regress/sql/citus_non_blocking_split_shard_cleanup.sql +++ b/src/test/regress/sql/citus_non_blocking_split_shard_cleanup.sql @@ -32,7 +32,6 @@ INSERT INTO sensors SELECT i, '2020-01-05', '{}', 11011.10, 'A', 'I <3 Citus' FR \c - postgres - :master_port SET search_path TO "citus_split_test_schema"; SET citus.next_shard_id TO 8981007; -SET citus.defer_drop_after_shard_move TO OFF; -- BEGIN : Set node id variables SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset diff --git a/src/test/regress/sql/citus_non_blocking_split_shards.sql b/src/test/regress/sql/citus_non_blocking_split_shards.sql index 00b5b71cb..a387b07c5 100644 --- a/src/test/regress/sql/citus_non_blocking_split_shards.sql +++ b/src/test/regress/sql/citus_non_blocking_split_shards.sql @@ -129,11 +129,12 @@ SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, node SET ROLE test_shard_split_role; SET search_path TO "citus_split_test_schema"; SET citus.next_shard_id TO 8981007; -SET citus.defer_drop_after_shard_move TO OFF; SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); -- END : Move one shard before we split it. +CALL pg_catalog.citus_cleanup_orphaned_shards(); + -- BEGIN : Set node id variables SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset @@ -167,6 +168,8 @@ CALL pg_catalog.citus_cleanup_orphaned_resources(); SELECT citus_move_shard_placement(8981007, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='block_writes'); -- END : Move a shard post split. +CALL pg_catalog.citus_cleanup_orphaned_shards(); + -- BEGIN : Display current state. SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport FROM pg_dist_shard AS shard diff --git a/src/test/regress/sql/citus_non_blocking_splits_no_deferred_drop.sql b/src/test/regress/sql/citus_non_blocking_splits_no_deferred_drop.sql deleted file mode 100644 index b409aa9f4..000000000 --- a/src/test/regress/sql/citus_non_blocking_splits_no_deferred_drop.sql +++ /dev/null @@ -1,294 +0,0 @@ -/* -This suite runs without deferred drop enabled. - -Citus Shard Split Test.The test is model similar to 'shard_move_constraints'. -Here is a high level overview of test plan: - 1. Create a table 'sensors' (ShardCount = 2) to be split. Add indexes and statistics on this table. - 2. Create two other tables: 'reference_table' and 'colocated_dist_table', co-located with sensors. - 3. Create Foreign key constraints between the two co-located distributed tables. - 4. Load data into the three tables. - 5. Move one of the shards for 'sensors' to test ShardMove -> Split. - 6. Trigger Split on both shards of 'sensors'. This will also split co-located tables. - 7. Move one of the split shard to test Split -> ShardMove. - 8. Split an already split shard second time on a different schema. - 9. Create a colocated table with no replica identity. - 10. Show we do not allow Split with the shard transfer mode 'auto' if any colocated table has no replica identity. - 11. Drop the colocated table with no replica identity. - 12. Show we allow Split with the shard transfer mode 'auto' if all colocated tables has replica identity. -*/ - -CREATE SCHEMA "citus_split_test_schema_no_deferred_drop"; - -SET citus.defer_drop_after_shard_split TO OFF; - -CREATE ROLE test_shard_split_role_nodeferred_drop WITH LOGIN; -GRANT USAGE, CREATE ON SCHEMA "citus_split_test_schema_no_deferred_drop" TO test_shard_split_role_nodeferred_drop; -SET ROLE test_shard_split_role_nodeferred_drop; - -SET search_path TO "citus_split_test_schema_no_deferred_drop"; -SET citus.next_shard_id TO 8981000; -SET citus.next_placement_id TO 8610000; -SET citus.shard_count TO 2; -SET citus.shard_replication_factor TO 1; - --- BEGIN: Create table to split, along with other co-located tables. Add indexes, statistics etc. -CREATE TABLE sensors( - measureid integer, - eventdatetime date, - measure_data jsonb, - meaure_quantity decimal(15, 2), - measure_status char(1), - measure_comment varchar(44), - PRIMARY KEY (measureid, eventdatetime, measure_data)); - -CREATE INDEX index_on_sensors ON sensors(lower(measureid::text)); -ALTER INDEX index_on_sensors ALTER COLUMN 1 SET STATISTICS 1000; -CREATE INDEX hash_index_on_sensors ON sensors USING HASH((measure_data->'IsFailed')); -CREATE INDEX index_with_include_on_sensors ON sensors ((measure_data->'IsFailed')) INCLUDE (measure_data, eventdatetime, measure_status); -CREATE STATISTICS stats_on_sensors (dependencies) ON measureid, eventdatetime FROM sensors; - -SELECT create_distributed_table('sensors', 'measureid', colocate_with:='none'); --- END: Create table to split, along with other co-located tables. Add indexes, statistics etc. - --- BEGIN: Create co-located distributed and reference tables. -CREATE TABLE reference_table (measureid integer PRIMARY KEY); -SELECT create_reference_table('reference_table'); - -CREATE TABLE colocated_dist_table (measureid integer PRIMARY KEY); -CLUSTER colocated_dist_table USING colocated_dist_table_pkey; -SELECT create_distributed_table('colocated_dist_table', 'measureid', colocate_with:='sensors'); - -CREATE TABLE table_with_index_rep_identity(key int NOT NULL); -CREATE UNIQUE INDEX uqx ON table_with_index_rep_identity(key); -ALTER TABLE table_with_index_rep_identity REPLICA IDENTITY USING INDEX uqx; -CLUSTER table_with_index_rep_identity USING uqx; -SELECT create_distributed_table('table_with_index_rep_identity', 'key', colocate_with:='sensors'); --- END: Create co-located distributed and reference tables. - --- BEGIN : Create Foreign key constraints. -ALTER TABLE sensors ADD CONSTRAINT fkey_table_to_dist FOREIGN KEY (measureid) REFERENCES colocated_dist_table(measureid); --- END : Create Foreign key constraints. - --- BEGIN : Load data into tables. -INSERT INTO reference_table SELECT i FROM generate_series(0,1000)i; -INSERT INTO colocated_dist_table SELECT i FROM generate_series(0,1000)i; -INSERT INTO sensors SELECT i, '2020-01-05', '{}', 11011.10, 'A', 'I <3 Citus' FROM generate_series(0,1000)i; - -SELECT COUNT(*) FROM sensors; -SELECT COUNT(*) FROM reference_table; -SELECT COUNT(*) FROM colocated_dist_table; --- END: Load data into tables. - --- BEGIN : Display current state. -SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport - FROM pg_dist_shard AS shard - INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid - INNER JOIN pg_dist_node node ON placement.groupid = node.groupid - INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid - WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass) - ORDER BY logicalrelid, shardminvalue::BIGINT; - -\c - - - :worker_1_port - SET search_path TO "citus_split_test_schema_no_deferred_drop", public, pg_catalog; - SET citus.show_shards_for_app_name_prefixes = '*'; - SELECT tbl.relname, fk."Constraint", fk."Definition" - FROM pg_catalog.pg_class tbl - JOIN public.table_fkeys fk on tbl.oid = fk.relid - WHERE tbl.relname like 'sensors_%' - ORDER BY 1, 2; - SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2; - SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2; - SELECT stxname FROM pg_statistic_ext - WHERE stxnamespace IN ( - SELECT oid - FROM pg_namespace - WHERE nspname IN ('citus_split_test_schema_no_deferred_drop') - ) - ORDER BY stxname ASC; - -\c - - - :worker_2_port - SET search_path TO "citus_split_test_schema_no_deferred_drop", public, pg_catalog; - SET citus.show_shards_for_app_name_prefixes = '*'; - SELECT tbl.relname, fk."Constraint", fk."Definition" - FROM pg_catalog.pg_class tbl - JOIN public.table_fkeys fk on tbl.oid = fk.relid - WHERE tbl.relname like 'sensors_%' - ORDER BY 1, 2; - SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2; - SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2; - SELECT stxname FROM pg_statistic_ext - WHERE stxnamespace IN ( - SELECT oid - FROM pg_namespace - WHERE nspname IN ('citus_split_test_schema_no_deferred_drop') - ) - ORDER BY stxname ASC; --- END : Display current state - --- BEGIN : Move one shard before we split it. -\c - postgres - :master_port -SET citus.defer_drop_after_shard_split TO OFF; -SET ROLE test_shard_split_role_nodeferred_drop; -SET search_path TO "citus_split_test_schema_no_deferred_drop"; -SET citus.next_shard_id TO 8981007; -SET citus.defer_drop_after_shard_move TO OFF; - -SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); --- END : Move one shard before we split it. - --- BEGIN : Set node id variables -SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset -SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset --- END : Set node id variables - --- BEGIN : Split two shards : One with move and One without move. --- Perform 2 way split -SELECT pg_catalog.citus_split_shard_by_split_points( - 8981000, - ARRAY['-1073741824'], - ARRAY[:worker_1_node, :worker_2_node], - 'force_logical'); - --- Perform 3 way split -SELECT pg_catalog.citus_split_shard_by_split_points( - 8981001, - ARRAY['536870911', '1610612735'], - ARRAY[:worker_1_node, :worker_1_node, :worker_2_node], - 'force_logical'); --- END : Split two shards : One with move and One without move. - --- BEGIN : Move a shard post split. -SELECT citus_move_shard_placement(8981007, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='block_writes'); --- END : Move a shard post split. - --- BEGIN : Display current state. -SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport - FROM pg_dist_shard AS shard - INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid - INNER JOIN pg_dist_node node ON placement.groupid = node.groupid - INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid - WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass) - ORDER BY logicalrelid, shardminvalue::BIGINT; - -\c - - - :worker_1_port - SET search_path TO "citus_split_test_schema_no_deferred_drop", public, pg_catalog; - SET citus.show_shards_for_app_name_prefixes = '*'; - SELECT tbl.relname, fk."Constraint", fk."Definition" - FROM pg_catalog.pg_class tbl - JOIN public.table_fkeys fk on tbl.oid = fk.relid - WHERE tbl.relname like 'sensors_%' - ORDER BY 1, 2; - SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2; - SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2; - SELECT stxname FROM pg_statistic_ext - WHERE stxnamespace IN ( - SELECT oid - FROM pg_namespace - WHERE nspname IN ('citus_split_test_schema_no_deferred_drop') - ) - ORDER BY stxname ASC; - -\c - - - :worker_2_port - SET search_path TO "citus_split_test_schema_no_deferred_drop", public, pg_catalog; - SET citus.show_shards_for_app_name_prefixes = '*'; - SELECT tbl.relname, fk."Constraint", fk."Definition" - FROM pg_catalog.pg_class tbl - JOIN public.table_fkeys fk on tbl.oid = fk.relid - WHERE tbl.relname like 'sensors_%' - ORDER BY 1, 2; - SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2; - SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2; - SELECT stxname FROM pg_statistic_ext - WHERE stxnamespace IN ( - SELECT oid - FROM pg_namespace - WHERE nspname IN ('citus_split_test_schema_no_deferred_drop') - ) - ORDER BY stxname ASC; --- END : Display current state - --- BEGIN: Should be able to change/drop constraints -\c - postgres - :master_port -SET citus.defer_drop_after_shard_split TO OFF; -SET ROLE test_shard_split_role_nodeferred_drop; -SET search_path TO "citus_split_test_schema_no_deferred_drop"; -ALTER INDEX index_on_sensors RENAME TO index_on_sensors_renamed; -ALTER INDEX index_on_sensors_renamed ALTER COLUMN 1 SET STATISTICS 200; -DROP STATISTICS stats_on_sensors; -DROP INDEX index_on_sensors_renamed; -ALTER TABLE sensors DROP CONSTRAINT fkey_table_to_dist; --- END: Should be able to change/drop constraints - --- BEGIN: Split second time on another schema -SET search_path TO public; -SET citus.next_shard_id TO 8981031; -SELECT pg_catalog.citus_split_shard_by_split_points( - 8981007, - ARRAY['-2100000000'], - ARRAY[:worker_1_node, :worker_2_node], - 'force_logical'); - -SET search_path TO "citus_split_test_schema_no_deferred_drop"; -SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport - FROM pg_dist_shard AS shard - INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid - INNER JOIN pg_dist_node node ON placement.groupid = node.groupid - INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid - WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass) - ORDER BY logicalrelid, shardminvalue::BIGINT; --- END: Split second time on another schema - --- BEGIN: Create a co-located table with no replica identity. -CREATE TABLE table_no_rep_id (measureid integer); -SELECT create_distributed_table('table_no_rep_id', 'measureid', colocate_with:='sensors'); --- END: Create a co-located table with no replica identity. - --- BEGIN: Split a shard with shard_transfer_mode='auto' and with a colocated table with no replica identity -SET citus.next_shard_id TO 8981041; -SELECT pg_catalog.citus_split_shard_by_split_points( - 8981031, - ARRAY['-2120000000'], - ARRAY[:worker_1_node, :worker_2_node]); - -SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport - FROM pg_dist_shard AS shard - INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid - INNER JOIN pg_dist_node node ON placement.groupid = node.groupid - INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid - WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass) - ORDER BY logicalrelid, shardminvalue::BIGINT; --- END: Split a shard with shard_transfer_mode='auto' and with a colocated table with no replica identity - --- BEGIN: Drop the co-located table with no replica identity. -DROP TABLE table_no_rep_id; --- END: Drop the co-located table with no replica identity. - --- BEGIN: Split a shard with shard_transfer_mode='auto' and with all colocated tables has replica identity -SET citus.next_shard_id TO 8981041; -SELECT pg_catalog.citus_split_shard_by_split_points( - 8981031, - ARRAY['-2120000000'], - ARRAY[:worker_1_node, :worker_2_node], - 'auto'); - -SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport - FROM pg_dist_shard AS shard - INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid - INNER JOIN pg_dist_node node ON placement.groupid = node.groupid - INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid - WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass) - ORDER BY logicalrelid, shardminvalue::BIGINT; --- END: Split a shard with shard_transfer_mode='auto' and with all colocated tables has replica identity - --- BEGIN: Validate Data Count -SELECT COUNT(*) FROM sensors; -SELECT COUNT(*) FROM reference_table; -SELECT COUNT(*) FROM colocated_dist_table; --- END: Validate Data Count - ---BEGIN : Cleanup -\c - postgres - :master_port -DROP SCHEMA "citus_split_test_schema_no_deferred_drop" CASCADE; - -SET citus.defer_drop_after_shard_split TO ON; ---END : Cleanup diff --git a/src/test/regress/sql/citus_split_shard_by_split_points.sql b/src/test/regress/sql/citus_split_shard_by_split_points.sql index e9e568de3..24bf42951 100644 --- a/src/test/regress/sql/citus_split_shard_by_split_points.sql +++ b/src/test/regress/sql/citus_split_shard_by_split_points.sql @@ -125,10 +125,10 @@ SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, node SET ROLE test_split_role; SET search_path TO "citus_split_test_schema"; SET citus.next_shard_id TO 8981007; -SET citus.defer_drop_after_shard_move TO OFF; SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); -- END : Move one shard before we split it. +CALL citus_cleanup_orphaned_shards(); -- BEGIN : Set node id variables SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset @@ -163,6 +163,8 @@ CALL pg_catalog.citus_cleanup_orphaned_resources(); SELECT citus_move_shard_placement(8981007, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='block_writes'); -- END : Move a shard post split. +CALL citus_cleanup_orphaned_shards(); + -- BEGIN : Display current state. SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport FROM pg_dist_shard AS shard diff --git a/src/test/regress/sql/citus_split_shard_no_deferred_drop.sql b/src/test/regress/sql/citus_split_shard_no_deferred_drop.sql deleted file mode 100644 index 76d8b6a45..000000000 --- a/src/test/regress/sql/citus_split_shard_no_deferred_drop.sql +++ /dev/null @@ -1,246 +0,0 @@ -/* -This suite runs without deferred drop enabled. -Citus Shard Split Test.The test is model similar to 'shard_move_constraints'. -Here is a high level overview of test plan: - 1. Create a table 'sensors' (ShardCount = 2) to be split. Add indexes and statistics on this table. - 2. Create two other tables: 'reference_table' and 'colocated_dist_table', co-located with sensors. - 3. Create Foreign key constraints between the two co-located distributed tables. - 4. Load data into the three tables. - 5. Move one of the shards for 'sensors' to test ShardMove -> Split. - 6. Trigger Split on both shards of 'sensors'. This will also split co-located tables. - 7. Move one of the split shard to test Split -> ShardMove. - 8. Split an already split shard second time on a different schema. -*/ - -CREATE SCHEMA "citus_split_test_schema_no_deferred_drop"; - -SET citus.defer_drop_after_shard_split TO OFF; -CREATE ROLE test_split_deferred_role WITH LOGIN; -GRANT USAGE, CREATE ON SCHEMA "citus_split_test_schema_no_deferred_drop" TO test_split_deferred_role; -SET ROLE test_split_deferred_role; - -SET search_path TO "citus_split_test_schema_no_deferred_drop"; -SET citus.next_shard_id TO 8981000; -SET citus.next_placement_id TO 8610000; -SET citus.shard_count TO 2; -SET citus.shard_replication_factor TO 1; - --- BEGIN: Create table to split, along with other co-located tables. Add indexes, statistics etc. -CREATE TABLE sensors( - measureid integer, - eventdatetime date, - measure_data jsonb, - meaure_quantity decimal(15, 2), - measure_status char(1), - measure_comment varchar(44), - PRIMARY KEY (measureid, eventdatetime, measure_data)); - -CREATE INDEX index_on_sensors ON sensors(lower(measureid::text)); -ALTER INDEX index_on_sensors ALTER COLUMN 1 SET STATISTICS 1000; -CREATE INDEX hash_index_on_sensors ON sensors USING HASH((measure_data->'IsFailed')); -CREATE INDEX index_with_include_on_sensors ON sensors ((measure_data->'IsFailed')) INCLUDE (measure_data, eventdatetime, measure_status); -CREATE STATISTICS stats_on_sensors (dependencies) ON measureid, eventdatetime FROM sensors; - -SELECT create_distributed_table('sensors', 'measureid', colocate_with:='none'); --- END: Create table to split, along with other co-located tables. Add indexes, statistics etc. - --- BEGIN: Create co-located distributed and reference tables. -CREATE TABLE reference_table (measureid integer PRIMARY KEY); -SELECT create_reference_table('reference_table'); - -CREATE TABLE colocated_dist_table (measureid integer PRIMARY KEY); -CLUSTER colocated_dist_table USING colocated_dist_table_pkey; -SELECT create_distributed_table('colocated_dist_table', 'measureid', colocate_with:='sensors'); - -CREATE TABLE table_with_index_rep_identity(key int NOT NULL); -CREATE UNIQUE INDEX uqx ON table_with_index_rep_identity(key); -ALTER TABLE table_with_index_rep_identity REPLICA IDENTITY USING INDEX uqx; -CLUSTER table_with_index_rep_identity USING uqx; -SELECT create_distributed_table('table_with_index_rep_identity', 'key', colocate_with:='sensors'); --- END: Create co-located distributed and reference tables. - --- BEGIN : Create Foreign key constraints. -ALTER TABLE sensors ADD CONSTRAINT fkey_table_to_dist FOREIGN KEY (measureid) REFERENCES colocated_dist_table(measureid); --- END : Create Foreign key constraints. - --- BEGIN : Load data into tables. -INSERT INTO reference_table SELECT i FROM generate_series(0,1000)i; -INSERT INTO colocated_dist_table SELECT i FROM generate_series(0,1000)i; -INSERT INTO sensors SELECT i, '2020-01-05', '{}', 11011.10, 'A', 'I <3 Citus' FROM generate_series(0,1000)i; - -SELECT COUNT(*) FROM sensors; -SELECT COUNT(*) FROM reference_table; -SELECT COUNT(*) FROM colocated_dist_table; --- END: Load data into tables. - --- BEGIN : Display current state. -SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport - FROM pg_dist_shard AS shard - INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid - INNER JOIN pg_dist_node node ON placement.groupid = node.groupid - INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid - WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass) - ORDER BY logicalrelid, shardminvalue::BIGINT; - -\c - - - :worker_1_port - SET search_path TO "citus_split_test_schema_no_deferred_drop", public, pg_catalog; - SET citus.show_shards_for_app_name_prefixes = '*'; - SELECT tbl.relname, fk."Constraint", fk."Definition" - FROM pg_catalog.pg_class tbl - JOIN public.table_fkeys fk on tbl.oid = fk.relid - WHERE tbl.relname like 'sensors_%' - ORDER BY 1, 2; - SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2; - SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2; - SELECT stxname FROM pg_statistic_ext - WHERE stxnamespace IN ( - SELECT oid - FROM pg_namespace - WHERE nspname IN ('citus_split_test_schema_no_deferred_drop') - ) - ORDER BY stxname ASC; - -\c - - - :worker_2_port - SET search_path TO "citus_split_test_schema_no_deferred_drop", public, pg_catalog; - SET citus.show_shards_for_app_name_prefixes = '*'; - SELECT tbl.relname, fk."Constraint", fk."Definition" - FROM pg_catalog.pg_class tbl - JOIN public.table_fkeys fk on tbl.oid = fk.relid - WHERE tbl.relname like 'sensors_%' - ORDER BY 1, 2; - SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2; - SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2; - SELECT stxname FROM pg_statistic_ext - WHERE stxnamespace IN ( - SELECT oid - FROM pg_namespace - WHERE nspname IN ('citus_split_test_schema_no_deferred_drop') - ) - ORDER BY stxname ASC; --- END : Display current state - --- BEGIN : Move one shard before we split it. -\c - postgres - :master_port -SET citus.defer_drop_after_shard_split TO OFF; -SET ROLE test_split_deferred_role; -SET search_path TO "citus_split_test_schema_no_deferred_drop"; -SET citus.next_shard_id TO 8981007; -SET citus.defer_drop_after_shard_move TO OFF; - -SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); --- END : Move one shard before we split it. - --- BEGIN : Set node id variables -SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset -SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset --- END : Set node id variables - --- BEGIN : Split two shards : One with move and One without move. --- Perform 2 way split -SELECT pg_catalog.citus_split_shard_by_split_points( - 8981000, - ARRAY['-1073741824'], - ARRAY[:worker_1_node, :worker_2_node], - 'block_writes'); - --- Perform 3 way split -SELECT pg_catalog.citus_split_shard_by_split_points( - 8981001, - ARRAY['536870911', '1610612735'], - ARRAY[:worker_1_node, :worker_1_node, :worker_2_node], - 'block_writes'); --- END : Split two shards : One with move and One without move. - --- BEGIN : Move a shard post split. -SELECT citus_move_shard_placement(8981007, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='block_writes'); --- END : Move a shard post split. - --- BEGIN : Display current state. -SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport - FROM pg_dist_shard AS shard - INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid - INNER JOIN pg_dist_node node ON placement.groupid = node.groupid - INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid - WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass) - ORDER BY logicalrelid, shardminvalue::BIGINT; - -\c - - - :worker_1_port - SET search_path TO "citus_split_test_schema_no_deferred_drop", public, pg_catalog; - SET citus.show_shards_for_app_name_prefixes = '*'; - SELECT tbl.relname, fk."Constraint", fk."Definition" - FROM pg_catalog.pg_class tbl - JOIN public.table_fkeys fk on tbl.oid = fk.relid - WHERE tbl.relname like 'sensors_%' - ORDER BY 1, 2; - SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2; - SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2; - SELECT stxname FROM pg_statistic_ext - WHERE stxnamespace IN ( - SELECT oid - FROM pg_namespace - WHERE nspname IN ('citus_split_test_schema_no_deferred_drop') - ) - ORDER BY stxname ASC; - -\c - - - :worker_2_port - SET search_path TO "citus_split_test_schema_no_deferred_drop", public, pg_catalog; - SET citus.show_shards_for_app_name_prefixes = '*'; - SELECT tbl.relname, fk."Constraint", fk."Definition" - FROM pg_catalog.pg_class tbl - JOIN public.table_fkeys fk on tbl.oid = fk.relid - WHERE tbl.relname like 'sensors_%' - ORDER BY 1, 2; - SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2; - SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2; - SELECT stxname FROM pg_statistic_ext - WHERE stxnamespace IN ( - SELECT oid - FROM pg_namespace - WHERE nspname IN ('citus_split_test_schema_no_deferred_drop') - ) - ORDER BY stxname ASC; --- END : Display current state - --- BEGIN: Should be able to change/drop constraints -\c - postgres - :master_port -SET citus.defer_drop_after_shard_split TO OFF; -SET ROLE test_split_deferred_role; -SET search_path TO "citus_split_test_schema_no_deferred_drop"; -ALTER INDEX index_on_sensors RENAME TO index_on_sensors_renamed; -ALTER INDEX index_on_sensors_renamed ALTER COLUMN 1 SET STATISTICS 200; -DROP STATISTICS stats_on_sensors; -DROP INDEX index_on_sensors_renamed; -ALTER TABLE sensors DROP CONSTRAINT fkey_table_to_dist; --- END: Should be able to change/drop constraints - --- BEGIN: Split second time on another schema -SET search_path TO public; -SET citus.next_shard_id TO 8981031; -SELECT pg_catalog.citus_split_shard_by_split_points( - 8981007, - ARRAY['-2100000000'], - ARRAY[:worker_1_node, :worker_2_node], - 'block_writes'); - -SET search_path TO "citus_split_test_schema_no_deferred_drop"; -SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport - FROM pg_dist_shard AS shard - INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid - INNER JOIN pg_dist_node node ON placement.groupid = node.groupid - INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid - WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass) - ORDER BY logicalrelid, shardminvalue::BIGINT; --- END: Split second time on another schema - --- BEGIN: Validate Data Count -SELECT COUNT(*) FROM sensors; -SELECT COUNT(*) FROM reference_table; -SELECT COUNT(*) FROM colocated_dist_table; --- END: Validate Data Count - ---BEGIN : Cleanup -\c - postgres - :master_port -DROP SCHEMA "citus_split_test_schema_no_deferred_drop" CASCADE; - -SET citus.defer_drop_after_shard_split TO ON; ---END : Cleanup diff --git a/src/test/regress/sql/failure_tenant_isolation.sql b/src/test/regress/sql/failure_tenant_isolation.sql index 874b3a59e..227e61969 100644 --- a/src/test/regress/sql/failure_tenant_isolation.sql +++ b/src/test/regress/sql/failure_tenant_isolation.sql @@ -86,29 +86,6 @@ SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_1 ADD CONSTRAINT").after(2).cancel(' || :pid || ')'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes'); - --- failure on dropping old colocated shard --- Disable deferred drop otherwise we will skip the drop and operation will succeed instead of failing. -SET citus.defer_drop_after_shard_split TO OFF; - -SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").kill()'); -SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes'); - --- cancellation on dropping old colocated shard -SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").cancel(' || :pid || ')'); -SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes'); - --- failure on dropping old shard -SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").kill()'); -SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes'); - --- cancellation on dropping old shard -SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").cancel(' || :pid || ')'); -SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes'); - --- Re-enable deferred drop for rest of the tests. -SET citus.defer_drop_after_shard_split TO ON; - -- failure on foreign key creation SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey FOREIGN KEY").kill()'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes'); diff --git a/src/test/regress/sql/failure_tenant_isolation_nonblocking.sql b/src/test/regress/sql/failure_tenant_isolation_nonblocking.sql index 2ee928a56..8b66d11a1 100644 --- a/src/test/regress/sql/failure_tenant_isolation_nonblocking.sql +++ b/src/test/regress/sql/failure_tenant_isolation_nonblocking.sql @@ -149,29 +149,6 @@ SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode SELECT citus.mitmproxy('conn.onQuery(query="select pg_drop_replication_slot").cancel(' || :pid || ')'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical'); --- failure on dropping old shard --- failure on dropping old colocated shard --- Disable deferred drop otherwise we will skip the drop and operation will succeed instead of failing. -SET citus.defer_drop_after_shard_split TO OFF; - -SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").kill()'); -SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical'); - --- cancellation on dropping old shard -SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").cancel(' || :pid || ')'); -SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical'); - --- failure on dropping old colocated shard -SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").kill()'); -SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical'); - --- cancellation on dropping old colocated shard -SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").cancel(' || :pid || ')'); -SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical'); - --- Re-enable deferred drop for rest of the tests. -SET citus.defer_drop_after_shard_split TO ON; - -- failure on foreign key creation SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey FOREIGN KEY").kill()'); SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical'); diff --git a/src/test/regress/sql/multi_extension.sql b/src/test/regress/sql/multi_extension.sql index e84d51ba7..4f4bcd8c3 100644 --- a/src/test/regress/sql/multi_extension.sql +++ b/src/test/regress/sql/multi_extension.sql @@ -819,7 +819,6 @@ CREATE EXTENSION citus; CREATE TABLE test (x int, y int); INSERT INTO test VALUES (1,2); SET citus.shard_replication_factor TO 1; -SET citus.defer_drop_after_shard_split TO off; SELECT create_distributed_table_concurrently('test','x'); DROP TABLE test; TRUNCATE pg_dist_node; diff --git a/src/test/regress/sql/multi_tenant_isolation.sql b/src/test/regress/sql/multi_tenant_isolation.sql index 07a88cf98..bb505f971 100644 --- a/src/test/regress/sql/multi_tenant_isolation.sql +++ b/src/test/regress/sql/multi_tenant_isolation.sql @@ -399,29 +399,6 @@ RESET citus.enable_metadata_sync; CREATE EVENT TRIGGER abort_drop ON sql_drop EXECUTE PROCEDURE abort_drop_command(); -\c - postgres - :master_port --- Disable deferred drop otherwise we will skip the drop and operation will succeed instead of failing. -SET citus.defer_drop_after_shard_split TO OFF; -SET ROLE mx_isolation_role_ent; -SET search_path to "Tenant Isolation"; - -\set VERBOSITY terse -SELECT isolate_tenant_to_new_shard('orders_streaming', 104, 'CASCADE', shard_transfer_mode => 'block_writes'); - -\set VERBOSITY default - --- check if metadata is changed -SELECT * FROM pg_dist_shard - WHERE logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass - ORDER BY shardminvalue::BIGINT, logicalrelid; - -\c - - - :worker_1_port -SET search_path to "Tenant Isolation"; - --- however, new tables are already created -SET citus.override_table_visibility TO false; -\d - \c - postgres - :worker_1_port DROP EVENT TRIGGER abort_drop; diff --git a/src/test/regress/sql/multi_transaction_recovery.sql b/src/test/regress/sql/multi_transaction_recovery.sql index 5b5afb2e2..333807267 100644 --- a/src/test/regress/sql/multi_transaction_recovery.sql +++ b/src/test/regress/sql/multi_transaction_recovery.sql @@ -200,7 +200,6 @@ LIMIT 1; SELECT COUNT(*) FROM pg_dist_transaction; BEGIN; -SET LOCAL citus.defer_drop_after_shard_move TO OFF; SELECT citus_move_shard_placement((SELECT * FROM selected_shard), 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode := 'block_writes'); COMMIT; SELECT COUNT(*) FROM pg_dist_transaction; diff --git a/src/test/regress/sql/shard_move_deferred_delete.sql b/src/test/regress/sql/shard_move_deferred_delete.sql index 0b5d1a872..aa1578f10 100644 --- a/src/test/regress/sql/shard_move_deferred_delete.sql +++ b/src/test/regress/sql/shard_move_deferred_delete.sql @@ -6,7 +6,6 @@ SET citus.next_shard_id TO 20000000; SET citus.shard_count TO 6; SET citus.shard_replication_factor TO 1; -SET citus.defer_drop_after_shard_move TO on; CREATE SCHEMA shard_move_deferred_delete; SET search_path TO shard_move_deferred_delete;