Enable citus.defer_drop_after_shard_move by default (#4961)

Enable citus.defer_drop_after_shard_move by default
pull/4997/head
SaitTalhaNisanci 2021-05-21 10:48:32 +03:00 committed by GitHub
parent d7dd247fb5
commit 82f34a8d88
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 539 additions and 144 deletions

View File

@ -1061,11 +1061,24 @@ EnsureShardCanBeCopied(int64 shardId, const char *sourceNodeName, int32 sourceNo
targetNodeName, targetNodeName,
targetNodePort); targetNodePort);
if (targetPlacement != NULL) if (targetPlacement != NULL)
{
if (targetPlacement->shardState == SHARD_STATE_TO_DELETE)
{ {
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("shard " INT64_FORMAT " already exists in the target node", errmsg(
"shard " INT64_FORMAT " already exists in the target node",
shardId),
errdetail(
"The existing shard is marked for deletion, but could not be deleted because there are still active queries on it")));
}
else
{
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg(
"shard " INT64_FORMAT " already exists in the target node",
shardId))); shardId)));
} }
}
} }

View File

@ -640,7 +640,7 @@ RegisterCitusConfigVariables(void)
"citus.defer_shard_delete_interval to make sure defered deletions " "citus.defer_shard_delete_interval to make sure defered deletions "
"will be executed"), "will be executed"),
&DeferShardDeleteOnMove, &DeferShardDeleteOnMove,
false, true,
PGC_USERSET, PGC_USERSET,
0, 0,
NULL, NULL, NULL); NULL, NULL, NULL);
@ -655,7 +655,7 @@ RegisterCitusConfigVariables(void)
"the background worker moves on. When set to -1 this background " "the background worker moves on. When set to -1 this background "
"process is skipped."), "process is skipped."),
&DeferShardDeleteInterval, &DeferShardDeleteInterval,
-1, -1, 7 * 24 * 3600 * 1000, 15000, -1, 7 * 24 * 3600 * 1000,
PGC_SIGHUP, PGC_SIGHUP,
GUC_UNIT_MS, GUC_UNIT_MS,
NULL, NULL, NULL); NULL, NULL, NULL);

View File

@ -93,7 +93,7 @@ typedef struct MaintenanceDaemonDBData
/* config variable for distributed deadlock detection timeout */ /* config variable for distributed deadlock detection timeout */
double DistributedDeadlockDetectionTimeoutFactor = 2.0; double DistributedDeadlockDetectionTimeoutFactor = 2.0;
int Recover2PCInterval = 60000; int Recover2PCInterval = 60000;
int DeferShardDeleteInterval = 60000; int DeferShardDeleteInterval = 15000;
/* config variables for metadata sync timeout */ /* config variables for metadata sync timeout */
int MetadataSyncInterval = 60000; int MetadataSyncInterval = 60000;

View File

@ -61,6 +61,12 @@ SELECT count(*) FROM referencing_table2;
101 101
(1 row) (1 row)
SELECT 1 FROM public.master_defer_delete_shards();
?column?
---------------------------------------------------------------------
1
(1 row)
SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_to_reference_shard_rebalance.%' AND refd_relid LIKE 'fkey_to_reference_shard_rebalance.%' ORDER BY 1,2,3; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_to_reference_shard_rebalance.%' AND refd_relid LIKE 'fkey_to_reference_shard_rebalance.%' ORDER BY 1,2,3;
name | relid | refd_relid name | relid | refd_relid
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -102,6 +108,12 @@ SELECT count(*) FROM referencing_table2;
101 101
(1 row) (1 row)
SELECT 1 FROM public.master_defer_delete_shards();
?column?
---------------------------------------------------------------------
1
(1 row)
SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_to_reference_shard_rebalance.%' AND refd_relid LIKE 'fkey_to_reference_shard_rebalance.%' ORDER BY 1,2,3; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_to_reference_shard_rebalance.%' AND refd_relid LIKE 'fkey_to_reference_shard_rebalance.%' ORDER BY 1,2,3;
name | relid | refd_relid name | relid | refd_relid
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -31,7 +31,7 @@ x y
15 15 15 15
172 172 172 172
step s1-get-shard-distribution: step s1-get-shard-distribution:
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardid in (SELECT * FROM selected_shard) order by nodeport; select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 and shardid in (SELECT * FROM selected_shard) order by nodeport;
nodeport nodeport
@ -69,7 +69,7 @@ x y
15 16 15 16
172 173 172 173
step s1-get-shard-distribution: step s1-get-shard-distribution:
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardid in (SELECT * FROM selected_shard) order by nodeport; select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 and shardid in (SELECT * FROM selected_shard) order by nodeport;
nodeport nodeport
@ -109,7 +109,7 @@ x y
15 16 15 16
172 173 172 173
step s1-get-shard-distribution: step s1-get-shard-distribution:
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardid in (SELECT * FROM selected_shard) order by nodeport; select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 and shardid in (SELECT * FROM selected_shard) order by nodeport;
nodeport nodeport
@ -147,7 +147,7 @@ step s1-select:
x y x y
step s1-get-shard-distribution: step s1-get-shard-distribution:
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardid in (SELECT * FROM selected_shard) order by nodeport; select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 and shardid in (SELECT * FROM selected_shard) order by nodeport;
nodeport nodeport
@ -172,19 +172,18 @@ x y
172 172 172 172
step s1-move-placement: step s1-move-placement:
SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638, shard_transfer_mode:='block_writes') FROM selected_shard; SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638, shard_transfer_mode:='block_writes') FROM selected_shard;
<waiting ...>
step s2-end:
COMMIT;
step s1-move-placement: <... completed>
master_move_shard_placement master_move_shard_placement
step s2-end:
COMMIT;
step s1-end: step s1-end:
COMMIT; COMMIT;
step s1-get-shard-distribution: step s1-get-shard-distribution:
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardid in (SELECT * FROM selected_shard) order by nodeport; select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 and shardid in (SELECT * FROM selected_shard) order by nodeport;
nodeport nodeport
@ -225,7 +224,7 @@ x y
5 5 5 5
15 30 15 30
step s1-get-shard-distribution: step s1-get-shard-distribution:
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardid in (SELECT * FROM selected_shard) order by nodeport; select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 and shardid in (SELECT * FROM selected_shard) order by nodeport;
nodeport nodeport
@ -263,7 +262,7 @@ step s1-select:
x y x y
step s1-get-shard-distribution: step s1-get-shard-distribution:
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardid in (SELECT * FROM selected_shard) order by nodeport; select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 and shardid in (SELECT * FROM selected_shard) order by nodeport;
nodeport nodeport
@ -298,7 +297,7 @@ step s1-select:
x y z x y z
step s1-get-shard-distribution: step s1-get-shard-distribution:
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardid in (SELECT * FROM selected_shard) order by nodeport; select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 and shardid in (SELECT * FROM selected_shard) order by nodeport;
nodeport nodeport

View File

@ -46,7 +46,7 @@ x y
15 15 15 15
172 172 172 172
step s1-get-shard-distribution: step s1-get-shard-distribution:
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardid in (SELECT * FROM selected_shard) order by nodeport; select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 and shardid in (SELECT * FROM selected_shard) order by nodeport;
nodeport nodeport
@ -110,7 +110,7 @@ x y
15 16 15 16
172 173 172 173
step s1-get-shard-distribution: step s1-get-shard-distribution:
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardid in (SELECT * FROM selected_shard) order by nodeport; select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 and shardid in (SELECT * FROM selected_shard) order by nodeport;
nodeport nodeport
@ -172,7 +172,7 @@ step s1-select:
x y x y
step s1-get-shard-distribution: step s1-get-shard-distribution:
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardid in (SELECT * FROM selected_shard) order by nodeport; select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 and shardid in (SELECT * FROM selected_shard) order by nodeport;
nodeport nodeport
@ -214,22 +214,21 @@ run_commands_on_session_level_connection_to_node
step s1-move-placement: step s1-move-placement:
SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638, shard_transfer_mode:='block_writes') FROM selected_shard; SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638, shard_transfer_mode:='block_writes') FROM selected_shard;
<waiting ...>
master_move_shard_placement
step s2-commit-worker: step s2-commit-worker:
SELECT run_commands_on_session_level_connection_to_node('COMMIT'); SELECT run_commands_on_session_level_connection_to_node('COMMIT');
run_commands_on_session_level_connection_to_node run_commands_on_session_level_connection_to_node
step s1-move-placement: <... completed>
master_move_shard_placement
step s1-commit: step s1-commit:
COMMIT; COMMIT;
step s1-get-shard-distribution: step s1-get-shard-distribution:
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardid in (SELECT * FROM selected_shard) order by nodeport; select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 and shardid in (SELECT * FROM selected_shard) order by nodeport;
nodeport nodeport

View File

@ -30,7 +30,7 @@ x y
15 15 15 15
step s1-get-shard-distribution: step s1-get-shard-distribution:
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardid in (SELECT * FROM selected_shard) order by nodeport; select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 and shardid in (SELECT * FROM selected_shard) order by nodeport;
nodeport nodeport
@ -67,7 +67,7 @@ x y
15 16 15 16
step s1-get-shard-distribution: step s1-get-shard-distribution:
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardid in (SELECT * FROM selected_shard) order by nodeport; select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 and shardid in (SELECT * FROM selected_shard) order by nodeport;
nodeport nodeport
@ -106,7 +106,7 @@ x y
15 16 15 16
step s1-get-shard-distribution: step s1-get-shard-distribution:
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardid in (SELECT * FROM selected_shard) order by nodeport; select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 and shardid in (SELECT * FROM selected_shard) order by nodeport;
nodeport nodeport
@ -144,7 +144,7 @@ step s1-select:
x y x y
step s1-get-shard-distribution: step s1-get-shard-distribution:
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardid in (SELECT * FROM selected_shard) order by nodeport; select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 and shardid in (SELECT * FROM selected_shard) order by nodeport;
nodeport nodeport
@ -168,19 +168,18 @@ x y
15 15 15 15
step s1-move-placement: step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638, shard_transfer_mode:='block_writes'); SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638, shard_transfer_mode:='block_writes');
<waiting ...>
step s2-end:
COMMIT;
step s1-move-placement: <... completed>
master_move_shard_placement master_move_shard_placement
step s2-end:
COMMIT;
step s1-end: step s1-end:
COMMIT; COMMIT;
step s1-get-shard-distribution: step s1-get-shard-distribution:
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardid in (SELECT * FROM selected_shard) order by nodeport; select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 and shardid in (SELECT * FROM selected_shard) order by nodeport;
nodeport nodeport
@ -204,19 +203,18 @@ x y
15 15 15 15
step s1-move-placement: step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638, shard_transfer_mode:='block_writes'); SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638, shard_transfer_mode:='block_writes');
<waiting ...>
step s2-end:
COMMIT;
step s1-move-placement: <... completed>
master_move_shard_placement master_move_shard_placement
step s2-end:
COMMIT;
step s1-end: step s1-end:
COMMIT; COMMIT;
step s1-get-shard-distribution: step s1-get-shard-distribution:
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardid in (SELECT * FROM selected_shard) order by nodeport; select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 and shardid in (SELECT * FROM selected_shard) order by nodeport;
nodeport nodeport

View File

@ -45,7 +45,7 @@ x y
15 15 15 15
step s1-get-shard-distribution: step s1-get-shard-distribution:
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardid in (SELECT * FROM selected_shard) order by nodeport; select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 and shardid in (SELECT * FROM selected_shard) order by nodeport;
nodeport nodeport
@ -108,7 +108,7 @@ x y
15 16 15 16
step s1-get-shard-distribution: step s1-get-shard-distribution:
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardid in (SELECT * FROM selected_shard) order by nodeport; select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 and shardid in (SELECT * FROM selected_shard) order by nodeport;
nodeport nodeport
@ -170,7 +170,7 @@ step s1-select:
x y x y
step s1-get-shard-distribution: step s1-get-shard-distribution:
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardid in (SELECT * FROM selected_shard) order by nodeport; select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 and shardid in (SELECT * FROM selected_shard) order by nodeport;
nodeport nodeport
@ -212,22 +212,21 @@ run_commands_on_session_level_connection_to_node
step s1-move-placement: step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638, shard_transfer_mode:='block_writes'); SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638, shard_transfer_mode:='block_writes');
<waiting ...>
master_move_shard_placement
step s2-commit-worker: step s2-commit-worker:
SELECT run_commands_on_session_level_connection_to_node('COMMIT'); SELECT run_commands_on_session_level_connection_to_node('COMMIT');
run_commands_on_session_level_connection_to_node run_commands_on_session_level_connection_to_node
step s1-move-placement: <... completed>
master_move_shard_placement
step s1-commit: step s1-commit:
COMMIT; COMMIT;
step s1-get-shard-distribution: step s1-get-shard-distribution:
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardid in (SELECT * FROM selected_shard) order by nodeport; select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 and shardid in (SELECT * FROM selected_shard) order by nodeport;
nodeport nodeport
@ -269,22 +268,21 @@ run_commands_on_session_level_connection_to_node
step s1-move-placement: step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638, shard_transfer_mode:='block_writes'); SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638, shard_transfer_mode:='block_writes');
<waiting ...>
master_move_shard_placement
step s2-commit-worker: step s2-commit-worker:
SELECT run_commands_on_session_level_connection_to_node('COMMIT'); SELECT run_commands_on_session_level_connection_to_node('COMMIT');
run_commands_on_session_level_connection_to_node run_commands_on_session_level_connection_to_node
step s1-move-placement: <... completed>
master_move_shard_placement
step s1-commit: step s1-commit:
COMMIT; COMMIT;
step s1-get-shard-distribution: step s1-get-shard-distribution:
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardid in (SELECT * FROM selected_shard) order by nodeport; select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 and shardid in (SELECT * FROM selected_shard) order by nodeport;
nodeport nodeport

View File

@ -5,7 +5,6 @@ step s1-begin:
BEGIN; BEGIN;
step s1-move-placement: step s1-move-placement:
SET citus.defer_drop_after_shard_move TO ON;
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
master_move_shard_placement master_move_shard_placement
@ -34,7 +33,6 @@ step s1-begin:
BEGIN; BEGIN;
step s1-move-placement: step s1-move-placement:
SET citus.defer_drop_after_shard_move TO ON;
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
master_move_shard_placement master_move_shard_placement
@ -62,7 +60,6 @@ step s1-begin:
BEGIN; BEGIN;
step s1-move-placement: step s1-move-placement:
SET citus.defer_drop_after_shard_move TO ON;
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
master_move_shard_placement master_move_shard_placement
@ -119,3 +116,31 @@ run_try_drop_marked_shards
step s1-commit: step s1-commit:
COMMIT; COMMIT;
starting permutation: s1-begin s2-begin s2-select s1-move-placement-without-deferred s2-commit s1-commit
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s2-select:
SELECT COUNT(*) FROM t1;
count
0
step s1-move-placement-without-deferred:
SET citus.defer_drop_after_shard_move TO OFF;
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
<waiting ...>
step s2-commit:
COMMIT;
step s1-move-placement-without-deferred: <... completed>
master_move_shard_placement
step s1-commit:
COMMIT;

View File

@ -65,8 +65,8 @@ step s3-progress:
table_name shardid shard_size sourcename sourceport source_shard_sizetargetname targetport target_shard_sizeprogress table_name shardid shard_size sourcename sourceport source_shard_sizetargetname targetport target_shard_sizeprogress
colocated1 1500001 49152 localhost 57637 0 localhost 57638 49152 2 colocated1 1500001 49152 localhost 57637 49152 localhost 57638 49152 2
colocated2 1500005 376832 localhost 57637 0 localhost 57638 376832 2 colocated2 1500005 376832 localhost 57637 376832 localhost 57638 376832 2
colocated1 1500002 196608 localhost 57637 196608 localhost 57638 0 1 colocated1 1500002 196608 localhost 57637 196608 localhost 57638 0 1
colocated2 1500006 8192 localhost 57637 8192 localhost 57638 0 1 colocated2 1500006 8192 localhost 57637 8192 localhost 57638 0 1
step s2-unlock-2: step s2-unlock-2:

View File

@ -260,6 +260,7 @@ WHERE
p.logicalrelid = s.logicalrelid AND p.logicalrelid = s.logicalrelid AND
s.shardid = sp.shardid AND s.shardid = sp.shardid AND
colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass)
AND sp.shardstate != 4
ORDER BY s.shardid, sp.nodeport; ORDER BY s.shardid, sp.nodeport;
shardid | logicalrelid | nodeport shardid | logicalrelid | nodeport
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -303,6 +304,7 @@ WHERE
p.logicalrelid = s.logicalrelid AND p.logicalrelid = s.logicalrelid AND
s.shardid = sp.shardid AND s.shardid = sp.shardid AND
p.logicalrelid = 'table5_groupX'::regclass p.logicalrelid = 'table5_groupX'::regclass
AND sp.shardstate != 4
ORDER BY s.shardid, sp.nodeport; ORDER BY s.shardid, sp.nodeport;
shardid | logicalrelid | nodeport shardid | logicalrelid | nodeport
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -331,7 +333,8 @@ FROM
WHERE WHERE
p.logicalrelid = s.logicalrelid AND p.logicalrelid = s.logicalrelid AND
s.shardid = sp.shardid AND s.shardid = sp.shardid AND
p.logicalrelid = 'table5_groupX'::regclass p.logicalrelid = 'table5_groupX'::regclass AND
sp.shardstate != 4
ORDER BY s.shardid, sp.nodeport; ORDER BY s.shardid, sp.nodeport;
shardid | logicalrelid | nodeport shardid | logicalrelid | nodeport
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -355,6 +358,7 @@ WHERE
p.logicalrelid = s.logicalrelid AND p.logicalrelid = s.logicalrelid AND
s.shardid = sp.shardid AND s.shardid = sp.shardid AND
p.logicalrelid = 'table6_append'::regclass p.logicalrelid = 'table6_append'::regclass
AND sp.shardstate != 4
ORDER BY s.shardid, sp.nodeport; ORDER BY s.shardid, sp.nodeport;
shardid | logicalrelid | nodeport shardid | logicalrelid | nodeport
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -376,7 +380,8 @@ FROM
WHERE WHERE
p.logicalrelid = s.logicalrelid AND p.logicalrelid = s.logicalrelid AND
s.shardid = sp.shardid AND s.shardid = sp.shardid AND
p.logicalrelid = 'table6_append'::regclass p.logicalrelid = 'table6_append'::regclass AND
sp.shardstate != 4
ORDER BY s.shardid, sp.nodeport; ORDER BY s.shardid, sp.nodeport;
shardid | logicalrelid | nodeport shardid | logicalrelid | nodeport
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -386,8 +391,7 @@ ORDER BY s.shardid, sp.nodeport;
-- try to move shard from wrong node -- try to move shard from wrong node
SELECT master_move_shard_placement(13000021, 'localhost', :worker_1_port, 'localhost', :worker_2_port); SELECT master_move_shard_placement(13000021, 'localhost', :worker_1_port, 'localhost', :worker_2_port);
ERROR: could not find placement matching "localhost:xxxxx" ERROR: source placement must be in active state
HINT: Confirm the placement still exists and try again.
-- test shard move with foreign constraints -- test shard move with foreign constraints
DROP TABLE IF EXISTS table1_group1, table2_group1; DROP TABLE IF EXISTS table1_group1, table2_group1;
SET citus.shard_count TO 6; SET citus.shard_count TO 6;
@ -418,6 +422,7 @@ WHERE
p.logicalrelid = s.logicalrelid AND p.logicalrelid = s.logicalrelid AND
s.shardid = sp.shardid AND s.shardid = sp.shardid AND
colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass)
AND sp.shardstate != 4
ORDER BY s.shardid, sp.nodeport; ORDER BY s.shardid, sp.nodeport;
shardid | logicalrelid | nodeport shardid | logicalrelid | nodeport
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -449,6 +454,7 @@ WHERE
p.logicalrelid = s.logicalrelid AND p.logicalrelid = s.logicalrelid AND
s.shardid = sp.shardid AND s.shardid = sp.shardid AND
colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass)
AND sp.shardstate != 4
ORDER BY s.shardid, sp.nodeport; ORDER BY s.shardid, sp.nodeport;
shardid | logicalrelid | nodeport shardid | logicalrelid | nodeport
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -581,6 +587,7 @@ SELECT count(*) FROM move_partitions.events;
SELECT master_move_shard_placement(shardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port) SELECT master_move_shard_placement(shardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port)
FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
WHERE logicalrelid = 'move_partitions.events'::regclass AND nodeport = :worker_2_port WHERE logicalrelid = 'move_partitions.events'::regclass AND nodeport = :worker_2_port
AND shardstate != 4
ORDER BY shardid LIMIT 1; ORDER BY shardid LIMIT 1;
master_move_shard_placement master_move_shard_placement
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -598,7 +605,7 @@ ALTER TABLE move_partitions.events_1 ADD CONSTRAINT e_1_pk PRIMARY KEY (id);
-- should be able to move automatically now -- should be able to move automatically now
SELECT master_move_shard_placement(shardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port) SELECT master_move_shard_placement(shardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port)
FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
WHERE logicalrelid = 'move_partitions.events'::regclass AND nodeport = :worker_2_port WHERE logicalrelid = 'move_partitions.events'::regclass AND nodeport = :worker_2_port AND shardstate != 4
ORDER BY shardid LIMIT 1; ORDER BY shardid LIMIT 1;
master_move_shard_placement master_move_shard_placement
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -614,7 +621,7 @@ SELECT count(*) FROM move_partitions.events;
-- should also be able to move with block writes -- should also be able to move with block writes
SELECT master_move_shard_placement(shardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'block_writes') SELECT master_move_shard_placement(shardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'block_writes')
FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
WHERE logicalrelid = 'move_partitions.events'::regclass AND nodeport = :worker_2_port WHERE logicalrelid = 'move_partitions.events'::regclass AND nodeport = :worker_2_port AND shardstate != 4
ORDER BY shardid LIMIT 1; ORDER BY shardid LIMIT 1;
master_move_shard_placement master_move_shard_placement
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -141,9 +141,10 @@ SELECT
FROM FROM
pg_dist_shard NATURAL JOIN pg_dist_shard_placement pg_dist_shard NATURAL JOIN pg_dist_shard_placement
WHERE WHERE
logicalrelid = 'mx_table_1'::regclass (logicalrelid = 'mx_table_1'::regclass
OR logicalrelid = 'mx_table_2'::regclass OR logicalrelid = 'mx_table_2'::regclass
OR logicalrelid = 'mx_table_3'::regclass OR logicalrelid = 'mx_table_3'::regclass)
AND shardstate != 4
ORDER BY ORDER BY
logicalrelid, shardid; logicalrelid, shardid;
logicalrelid | shardid | nodename | nodeport logicalrelid | shardid | nodename | nodeport

View File

@ -133,6 +133,10 @@ $cmd$);
(localhost,57638,t,1) (localhost,57638,t,1)
(2 rows) (2 rows)
-- we expect to get an error since the old placement is still there
SELECT master_move_shard_placement(20000000, 'localhost', :worker_2_port, 'localhost', :worker_1_port);
ERROR: shard xxxxx already exists in the target node
DETAIL: The existing shard is marked for deletion, but could not be deleted because there are still active queries on it
SELECT run_command_on_workers($cmd$ SELECT run_command_on_workers($cmd$
-- override the function for testing purpose -- override the function for testing purpose
create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint)
@ -169,7 +173,6 @@ SELECT master_move_shard_placement(20000001, 'localhost', :worker_2_port, 'local
(1 row) (1 row)
ROLLBACK; ROLLBACK;
-- we expect shard xxxxx to be on both of the workers
SELECT run_command_on_workers($cmd$ SELECT run_command_on_workers($cmd$
SELECT count(*) FROM pg_class WHERE relname = 't1_20000000'; SELECT count(*) FROM pg_class WHERE relname = 't1_20000000';
$cmd$); $cmd$);

View File

@ -31,12 +31,24 @@ SELECT rebalance_table_shards('dist_table_test');
(1 row) (1 row)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
0
(1 row)
SELECT rebalance_table_shards(); SELECT rebalance_table_shards();
rebalance_table_shards rebalance_table_shards
--------------------------------------------------------------------- ---------------------------------------------------------------------
(1 row) (1 row)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
0
(1 row)
-- test that calling rebalance_table_shards without specifying relation -- test that calling rebalance_table_shards without specifying relation
-- wouldn't move shard of the citus local table. -- wouldn't move shard of the citus local table.
CREATE TABLE citus_local_table(a int, b int); CREATE TABLE citus_local_table(a int, b int);
@ -53,6 +65,12 @@ SELECT rebalance_table_shards();
(1 row) (1 row)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
0
(1 row)
-- show that citus local table shard is still on the coordinator -- show that citus local table shard is still on the coordinator
SELECT tablename FROM pg_catalog.pg_tables where tablename like 'citus_local_table_%'; SELECT tablename FROM pg_catalog.pg_tables where tablename like 'citus_local_table_%';
tablename tablename
@ -83,6 +101,12 @@ SELECT pg_sleep(.1); -- wait to make sure the config has changed before running
SELECT master_drain_node('localhost', :master_port); SELECT master_drain_node('localhost', :master_port);
ERROR: connection to the remote node foobar:57636 failed with the following error: could not translate host name "foobar" to address: <system specific error> ERROR: connection to the remote node foobar:57636 failed with the following error: could not translate host name "foobar" to address: <system specific error>
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
0
(1 row)
ALTER SYSTEM RESET citus.local_hostname; ALTER SYSTEM RESET citus.local_hostname;
SELECT pg_reload_conf(); SELECT pg_reload_conf();
pg_reload_conf pg_reload_conf
@ -102,6 +126,12 @@ SELECT master_drain_node('localhost', :master_port);
(1 row) (1 row)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
0
(1 row)
-- show that citus local table shard is still on the coordinator -- show that citus local table shard is still on the coordinator
SELECT tablename FROM pg_catalog.pg_tables where tablename like 'citus_local_table_%'; SELECT tablename FROM pg_catalog.pg_tables where tablename like 'citus_local_table_%';
tablename tablename
@ -404,6 +434,7 @@ SELECT master_create_distributed_table('replication_test_table', 'int_column', '
CREATE VIEW replication_test_table_placements_per_node AS CREATE VIEW replication_test_table_placements_per_node AS
SELECT count(*) FROM pg_dist_shard_placement NATURAL JOIN pg_dist_shard SELECT count(*) FROM pg_dist_shard_placement NATURAL JOIN pg_dist_shard
WHERE logicalrelid = 'replication_test_table'::regclass WHERE logicalrelid = 'replication_test_table'::regclass
AND shardstate != 4
GROUP BY nodename, nodeport GROUP BY nodename, nodeport
ORDER BY nodename, nodeport; ORDER BY nodename, nodeport;
-- Create four shards with replication factor 2, and delete the placements -- Create four shards with replication factor 2, and delete the placements
@ -526,6 +557,7 @@ SELECT master_create_distributed_table('rebalance_test_table', 'int_column', 'ap
CREATE VIEW table_placements_per_node AS CREATE VIEW table_placements_per_node AS
SELECT nodeport, logicalrelid::regclass, count(*) SELECT nodeport, logicalrelid::regclass, count(*)
FROM pg_dist_shard_placement NATURAL JOIN pg_dist_shard FROM pg_dist_shard_placement NATURAL JOIN pg_dist_shard
WHERE shardstate != 4
GROUP BY logicalrelid::regclass, nodename, nodeport GROUP BY logicalrelid::regclass, nodename, nodeport
ORDER BY logicalrelid::regclass, nodename, nodeport; ORDER BY logicalrelid::regclass, nodename, nodeport;
-- Create six shards with replication factor 1 and move them to the same -- Create six shards with replication factor 1 and move them to the same
@ -546,6 +578,7 @@ AS $$
pg_dist_shard_placement src USING (shardid), pg_dist_shard_placement src USING (shardid),
(SELECT nodename, nodeport FROM pg_dist_shard_placement ORDER BY nodeport DESC LIMIT 1) dst (SELECT nodename, nodeport FROM pg_dist_shard_placement ORDER BY nodeport DESC LIMIT 1) dst
WHERE src.nodeport < dst.nodeport AND s.logicalrelid = rel::regclass; WHERE src.nodeport < dst.nodeport AND s.logicalrelid = rel::regclass;
SELECT public.master_defer_delete_shards();
$$; $$;
CALL create_unbalanced_shards('rebalance_test_table'); CALL create_unbalanced_shards('rebalance_test_table');
SET citus.shard_replication_factor TO 2; SET citus.shard_replication_factor TO 2;
@ -590,6 +623,12 @@ FROM (
WHERE logicalrelid = 'rebalance_test_table'::regclass WHERE logicalrelid = 'rebalance_test_table'::regclass
) T; ) T;
ERROR: connection to the remote node foobar:57636 failed with the following error: could not translate host name "foobar" to address: <system specific error> ERROR: connection to the remote node foobar:57636 failed with the following error: could not translate host name "foobar" to address: <system specific error>
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
0
(1 row)
ALTER SYSTEM RESET citus.local_hostname; ALTER SYSTEM RESET citus.local_hostname;
SELECT pg_reload_conf(); SELECT pg_reload_conf();
pg_reload_conf pg_reload_conf
@ -618,6 +657,12 @@ FROM (
(1 row) (1 row)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
1
(1 row)
SELECT * FROM table_placements_per_node; SELECT * FROM table_placements_per_node;
nodeport | logicalrelid | count nodeport | logicalrelid | count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -669,6 +714,12 @@ SELECT * FROM table_placements_per_node;
57638 | rebalance_test_table | 5 57638 | rebalance_test_table | 5
(2 rows) (2 rows)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
0
(1 row)
SELECT rebalance_table_shards('rebalance_test_table', SELECT rebalance_table_shards('rebalance_test_table',
threshold := 0, max_shard_moves := 1, threshold := 0, max_shard_moves := 1,
shard_transfer_mode:='block_writes'); shard_transfer_mode:='block_writes');
@ -677,6 +728,12 @@ SELECT rebalance_table_shards('rebalance_test_table',
(1 row) (1 row)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
1
(1 row)
SELECT * FROM table_placements_per_node; SELECT * FROM table_placements_per_node;
nodeport | logicalrelid | count nodeport | logicalrelid | count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -691,6 +748,12 @@ SELECT rebalance_table_shards('rebalance_test_table', threshold := 1, shard_tran
(1 row) (1 row)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
0
(1 row)
SELECT * FROM table_placements_per_node; SELECT * FROM table_placements_per_node;
nodeport | logicalrelid | count nodeport | logicalrelid | count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -705,6 +768,12 @@ SELECT rebalance_table_shards('rebalance_test_table', threshold := 0);
(1 row) (1 row)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
1
(1 row)
SELECT * FROM table_placements_per_node; SELECT * FROM table_placements_per_node;
nodeport | logicalrelid | count nodeport | logicalrelid | count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -720,6 +789,12 @@ SELECT rebalance_table_shards('rebalance_test_table', threshold := 0, shard_tran
(1 row) (1 row)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
0
(1 row)
SELECT * FROM table_placements_per_node; SELECT * FROM table_placements_per_node;
nodeport | logicalrelid | count nodeport | logicalrelid | count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -893,6 +968,12 @@ SELECT COUNT(*) FROM imbalanced_table;
-- Try force_logical -- Try force_logical
SELECT rebalance_table_shards('imbalanced_table', threshold:=0, shard_transfer_mode:='force_logical'); SELECT rebalance_table_shards('imbalanced_table', threshold:=0, shard_transfer_mode:='force_logical');
ERROR: the force_logical transfer mode is currently unsupported ERROR: the force_logical transfer mode is currently unsupported
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
0
(1 row)
-- Test rebalance operation -- Test rebalance operation
SELECT rebalance_table_shards('imbalanced_table', threshold:=0, shard_transfer_mode:='block_writes'); SELECT rebalance_table_shards('imbalanced_table', threshold:=0, shard_transfer_mode:='block_writes');
rebalance_table_shards rebalance_table_shards
@ -900,6 +981,12 @@ SELECT rebalance_table_shards('imbalanced_table', threshold:=0, shard_transfer_m
(1 row) (1 row)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
1
(1 row)
-- Confirm rebalance -- Confirm rebalance
-- Shard counts in each node after rebalance -- Shard counts in each node after rebalance
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
@ -936,6 +1023,12 @@ FROM pg_dist_shard_placement
WHERE nodeport = :worker_2_port; WHERE nodeport = :worker_2_port;
ERROR: Moving shards to a non-existing node is not supported ERROR: Moving shards to a non-existing node is not supported
HINT: Add the target node via SELECT citus_add_node('localhost', 10000); HINT: Add the target node via SELECT citus_add_node('localhost', 10000);
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
0
(1 row)
-- Try to move shards to a node where shards are not allowed -- Try to move shards to a node where shards are not allowed
SELECT * from master_set_node_property('localhost', :worker_1_port, 'shouldhaveshards', false); SELECT * from master_set_node_property('localhost', :worker_1_port, 'shouldhaveshards', false);
master_set_node_property master_set_node_property
@ -979,6 +1072,12 @@ WHERE nodeport = :worker_2_port;
(2 rows) (2 rows)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
2
(1 row)
SELECT create_distributed_table('colocated_rebalance_test2', 'id'); SELECT create_distributed_table('colocated_rebalance_test2', 'id');
create_distributed_table create_distributed_table
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1006,6 +1105,12 @@ SELECT * FROM rebalance_table_shards('colocated_rebalance_test', threshold := 0,
(1 row) (1 row)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
0
(1 row)
-- Confirm that nothing changed -- Confirm that nothing changed
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
nodeport | logicalrelid | count nodeport | logicalrelid | count
@ -1047,6 +1152,12 @@ SELECT * FROM rebalance_table_shards('colocated_rebalance_test', threshold := 0,
(1 row) (1 row)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
4
(1 row)
-- Check that we can call this function without a crash -- Check that we can call this function without a crash
SELECT * FROM get_rebalance_progress(); SELECT * FROM get_rebalance_progress();
sessionid | table_name | shardid | shard_size | sourcename | sourceport | targetname | targetport | progress | source_shard_size | target_shard_size sessionid | table_name | shardid | shard_size | sourcename | sourceport | targetname | targetport | progress | source_shard_size | target_shard_size
@ -1104,6 +1215,12 @@ SELECT * FROM rebalance_table_shards('colocated_rebalance_test', threshold := 0,
(1 row) (1 row)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
4
(1 row)
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
nodeport | logicalrelid | count nodeport | logicalrelid | count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1126,6 +1243,12 @@ SELECT * FROM rebalance_table_shards('non_colocated_rebalance_test', threshold :
(1 row) (1 row)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
2
(1 row)
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
nodeport | logicalrelid | count nodeport | logicalrelid | count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1147,6 +1270,12 @@ SELECT * FROM rebalance_table_shards('colocated_rebalance_test', threshold := 0,
(1 row) (1 row)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
4
(1 row)
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
nodeport | logicalrelid | count nodeport | logicalrelid | count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1163,6 +1292,12 @@ SELECT * FROM rebalance_table_shards('non_colocated_rebalance_test', threshold :
(1 row) (1 row)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
2
(1 row)
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
nodeport | logicalrelid | count nodeport | logicalrelid | count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1199,6 +1334,12 @@ SELECT * FROM rebalance_table_shards(threshold := 0, shard_transfer_mode := 'blo
(1 row) (1 row)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
6
(1 row)
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
nodeport | logicalrelid | count nodeport | logicalrelid | count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1220,6 +1361,12 @@ SELECT * FROM rebalance_table_shards(threshold := 0, shard_transfer_mode := 'blo
(1 row) (1 row)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
6
(1 row)
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
nodeport | logicalrelid | count nodeport | logicalrelid | count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1256,6 +1403,12 @@ SELECT * FROM rebalance_table_shards(threshold := 0, shard_transfer_mode := 'blo
(1 row) (1 row)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
6
(1 row)
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
nodeport | logicalrelid | count nodeport | logicalrelid | count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1277,6 +1430,12 @@ SELECT * FROM rebalance_table_shards(threshold := 0, shard_transfer_mode := 'blo
(1 row) (1 row)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
6
(1 row)
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
nodeport | logicalrelid | count nodeport | logicalrelid | count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1302,6 +1461,12 @@ SELECT * from master_drain_node('localhost', :worker_2_port, shard_transfer_mode
(1 row) (1 row)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
6
(1 row)
select shouldhaveshards from pg_dist_node where nodeport = :worker_2_port; select shouldhaveshards from pg_dist_node where nodeport = :worker_2_port;
shouldhaveshards shouldhaveshards
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1329,6 +1494,12 @@ SELECT * FROM rebalance_table_shards(threshold := 0, shard_transfer_mode := 'blo
(1 row) (1 row)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
6
(1 row)
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
nodeport | logicalrelid | count nodeport | logicalrelid | count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1435,6 +1606,12 @@ SELECT * FROM rebalance_table_shards('tab', shard_transfer_mode:='block_writes')
(1 row) (1 row)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
0
(1 row)
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
nodeport | logicalrelid | count nodeport | logicalrelid | count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1449,6 +1626,12 @@ NOTICE: Moving shard xxxxx from localhost:xxxxx to localhost:xxxxx ...
(1 row) (1 row)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
1
(1 row)
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
nodeport | logicalrelid | count nodeport | logicalrelid | count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1464,6 +1647,12 @@ DETAIL: Using threshold of 0.01
(1 row) (1 row)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
0
(1 row)
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
nodeport | logicalrelid | count nodeport | logicalrelid | count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1569,6 +1758,12 @@ NOTICE: Moving shard xxxxx from localhost:xxxxx to localhost:xxxxx ...
(1 row) (1 row)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
4
(1 row)
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
nodeport | logicalrelid | count nodeport | logicalrelid | count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1663,6 +1858,12 @@ NOTICE: Moving shard xxxxx from localhost:xxxxx to localhost:xxxxx ...
(1 row) (1 row)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
3
(1 row)
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
nodeport | logicalrelid | count nodeport | logicalrelid | count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1686,6 +1887,12 @@ SELECT * FROM rebalance_table_shards('tab', shard_transfer_mode:='block_writes')
(1 row) (1 row)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
0
(1 row)
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
nodeport | logicalrelid | count nodeport | logicalrelid | count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1735,6 +1942,12 @@ NOTICE: Moving shard xxxxx from localhost:xxxxx to localhost:xxxxx ...
(1 row) (1 row)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
4
(1 row)
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
nodeport | logicalrelid | count nodeport | logicalrelid | count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1759,8 +1972,20 @@ SELECT * FROM get_rebalance_table_shards_plan('tab', rebalance_strategy := 'non_
ERROR: could not find rebalance strategy with name non_existing ERROR: could not find rebalance strategy with name non_existing
SELECT * FROM rebalance_table_shards('tab', rebalance_strategy := 'non_existing'); SELECT * FROM rebalance_table_shards('tab', rebalance_strategy := 'non_existing');
ERROR: could not find rebalance strategy with name non_existing ERROR: could not find rebalance strategy with name non_existing
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
0
(1 row)
SELECT * FROM master_drain_node('localhost', :worker_2_port, rebalance_strategy := 'non_existing'); SELECT * FROM master_drain_node('localhost', :worker_2_port, rebalance_strategy := 'non_existing');
ERROR: could not find rebalance strategy with name non_existing ERROR: could not find rebalance strategy with name non_existing
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
0
(1 row)
SELECT citus_set_default_rebalance_strategy('non_existing'); SELECT citus_set_default_rebalance_strategy('non_existing');
ERROR: strategy with specified name does not exist ERROR: strategy with specified name does not exist
UPDATE pg_dist_rebalance_strategy SET default_strategy=false; UPDATE pg_dist_rebalance_strategy SET default_strategy=false;
@ -1768,8 +1993,20 @@ SELECT * FROM get_rebalance_table_shards_plan('tab');
ERROR: no rebalance_strategy was provided, but there is also no default strategy set ERROR: no rebalance_strategy was provided, but there is also no default strategy set
SELECT * FROM rebalance_table_shards('tab'); SELECT * FROM rebalance_table_shards('tab');
ERROR: no rebalance_strategy was provided, but there is also no default strategy set ERROR: no rebalance_strategy was provided, but there is also no default strategy set
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
0
(1 row)
SELECT * FROM master_drain_node('localhost', :worker_2_port); SELECT * FROM master_drain_node('localhost', :worker_2_port);
ERROR: no rebalance_strategy was provided, but there is also no default strategy set ERROR: no rebalance_strategy was provided, but there is also no default strategy set
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
0
(1 row)
UPDATE pg_dist_rebalance_strategy SET default_strategy=true WHERE name='by_shard_count'; UPDATE pg_dist_rebalance_strategy SET default_strategy=true WHERE name='by_shard_count';
CREATE OR REPLACE FUNCTION shard_cost_no_arguments() CREATE OR REPLACE FUNCTION shard_cost_no_arguments()
RETURNS real AS $$ SELECT 1.0::real $$ LANGUAGE sql; RETURNS real AS $$ SELECT 1.0::real $$ LANGUAGE sql;
@ -2041,6 +2278,12 @@ SELECT rebalance_table_shards('rebalance_test_table', shard_transfer_mode:='bloc
(1 row) (1 row)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
3
(1 row)
SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'ref_table'::regclass; SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'ref_table'::regclass;
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -2102,6 +2345,12 @@ SELECT rebalance_table_shards();
(1 row) (1 row)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
2
(1 row)
DROP TABLE t1, r1, r2; DROP TABLE t1, r1, r2;
-- verify there are no distributed tables before we perform the following tests. Preceding -- verify there are no distributed tables before we perform the following tests. Preceding
-- test suites should clean up their distributed tables. -- test suites should clean up their distributed tables.
@ -2150,6 +2399,12 @@ SELECT rebalance_table_shards();
(1 row) (1 row)
SELECT public.master_defer_delete_shards();
master_defer_delete_shards
---------------------------------------------------------------------
0
(1 row)
-- verify the reference table is on all nodes after the rebalance -- verify the reference table is on all nodes after the rebalance
SELECT count(*) SELECT count(*)
FROM pg_dist_shard FROM pg_dist_shard

View File

@ -442,6 +442,7 @@ push(@pgOptions, "wal_retrieve_retry_interval=1000");
push(@pgOptions, "citus.shard_count=4"); push(@pgOptions, "citus.shard_count=4");
push(@pgOptions, "citus.max_adaptive_executor_pool_size=4"); push(@pgOptions, "citus.max_adaptive_executor_pool_size=4");
push(@pgOptions, "citus.shard_max_size=1500kB"); push(@pgOptions, "citus.shard_max_size=1500kB");
push(@pgOptions, "citus.defer_shard_delete_interval=-1");
push(@pgOptions, "citus.repartition_join_bucket_count_per_node=2"); push(@pgOptions, "citus.repartition_join_bucket_count_per_node=2");
push(@pgOptions, "citus.sort_returning='on'"); push(@pgOptions, "citus.sort_returning='on'");
push(@pgOptions, "citus.shard_replication_factor=2"); push(@pgOptions, "citus.shard_replication_factor=2");

View File

@ -8,6 +8,7 @@ setup
SET citus.shard_count TO 8; SET citus.shard_count TO 8;
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
CREATE TABLE logical_replicate_placement (x int PRIMARY KEY, y int); CREATE TABLE logical_replicate_placement (x int PRIMARY KEY, y int);
SELECT create_distributed_table('logical_replicate_placement', 'x'); SELECT create_distributed_table('logical_replicate_placement', 'x');
@ -53,7 +54,7 @@ step "s1-insert"
step "s1-get-shard-distribution" step "s1-get-shard-distribution"
{ {
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardid in (SELECT * FROM selected_shard) order by nodeport; select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 and shardid in (SELECT * FROM selected_shard) order by nodeport;
} }
session "s2" session "s2"

View File

@ -34,6 +34,7 @@ setup
SET citus.replication_model to streaming; SET citus.replication_model to streaming;
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
SET citus.shard_count TO 8; SET citus.shard_count TO 8;
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
CREATE TABLE logical_replicate_placement (x int PRIMARY KEY, y int); CREATE TABLE logical_replicate_placement (x int PRIMARY KEY, y int);
@ -80,7 +81,7 @@ step "s1-insert"
step "s1-get-shard-distribution" step "s1-get-shard-distribution"
{ {
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardid in (SELECT * FROM selected_shard) order by nodeport; select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 and shardid in (SELECT * FROM selected_shard) order by nodeport;
} }
session "s2" session "s2"

View File

@ -7,6 +7,7 @@ setup
SET citus.shard_count TO 8; SET citus.shard_count TO 8;
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
CREATE TABLE logical_replicate_placement (x int PRIMARY KEY, y int); CREATE TABLE logical_replicate_placement (x int PRIMARY KEY, y int);
SELECT create_distributed_table('logical_replicate_placement', 'x'); SELECT create_distributed_table('logical_replicate_placement', 'x');
@ -51,7 +52,7 @@ step "s1-insert"
step "s1-get-shard-distribution" step "s1-get-shard-distribution"
{ {
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardid in (SELECT * FROM selected_shard) order by nodeport; select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 and shardid in (SELECT * FROM selected_shard) order by nodeport;
} }
session "s2" session "s2"

View File

@ -33,6 +33,7 @@ setup
SET citus.replication_model to streaming; SET citus.replication_model to streaming;
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
SET citus.shard_count TO 8; SET citus.shard_count TO 8;
CREATE TABLE logical_replicate_placement (x int PRIMARY KEY, y int); CREATE TABLE logical_replicate_placement (x int PRIMARY KEY, y int);
SELECT create_distributed_table('logical_replicate_placement', 'x'); SELECT create_distributed_table('logical_replicate_placement', 'x');
@ -78,7 +79,7 @@ step "s1-insert"
step "s1-get-shard-distribution" step "s1-get-shard-distribution"
{ {
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardid in (SELECT * FROM selected_shard) order by nodeport; select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 and shardid in (SELECT * FROM selected_shard) order by nodeport;
} }
session "s2" session "s2"

View File

@ -36,7 +36,6 @@ COMMENT ON FUNCTION master_defer_delete_shards()
SET citus.next_shard_id to 120000; SET citus.next_shard_id to 120000;
SET citus.shard_count TO 8; SET citus.shard_count TO 8;
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
SET citus.defer_drop_after_shard_move TO ON;
CREATE TABLE t1 (x int PRIMARY KEY, y int); CREATE TABLE t1 (x int PRIMARY KEY, y int);
SELECT create_distributed_table('t1', 'x'); SELECT create_distributed_table('t1', 'x');
@ -61,10 +60,15 @@ step "s1-begin"
step "s1-move-placement" step "s1-move-placement"
{ {
SET citus.defer_drop_after_shard_move TO ON;
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
} }
step "s1-move-placement-without-deferred" {
SET citus.defer_drop_after_shard_move TO OFF;
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
}
step "s1-drop-marked-shards" step "s1-drop-marked-shards"
{ {
SELECT public.master_defer_delete_shards(); SELECT public.master_defer_delete_shards();
@ -81,6 +85,10 @@ step "s1-commit"
session "s2" session "s2"
step "s2-begin" {
BEGIN;
}
step "s2-drop-old-shards" { step "s2-drop-old-shards" {
SELECT run_try_drop_marked_shards(); SELECT run_try_drop_marked_shards();
} }
@ -101,15 +109,24 @@ step "s2-lock-table-on-worker"
SELECT run_commands_on_session_level_connection_to_node('LOCK TABLE t1_120000'); SELECT run_commands_on_session_level_connection_to_node('LOCK TABLE t1_120000');
} }
step "s2-select" {
SELECT COUNT(*) FROM t1;
}
step "s2-drop-marked-shards" step "s2-drop-marked-shards"
{ {
SET client_min_messages to DEBUG1; SET client_min_messages to DEBUG1;
SELECT public.master_defer_delete_shards(); SELECT public.master_defer_delete_shards();
} }
step "s2-commit" {
COMMIT;
}
permutation "s1-begin" "s1-move-placement" "s1-drop-marked-shards" "s2-drop-marked-shards" "s1-commit" permutation "s1-begin" "s1-move-placement" "s1-drop-marked-shards" "s2-drop-marked-shards" "s1-commit"
permutation "s1-begin" "s1-move-placement" "s2-drop-marked-shards" "s1-drop-marked-shards" "s1-commit" permutation "s1-begin" "s1-move-placement" "s2-drop-marked-shards" "s1-drop-marked-shards" "s1-commit"
permutation "s1-begin" "s1-move-placement" "s2-start-session-level-connection" "s2-lock-table-on-worker" "s1-drop-marked-shards" "s1-commit" "s2-stop-connection" permutation "s1-begin" "s1-move-placement" "s2-start-session-level-connection" "s2-lock-table-on-worker" "s1-drop-marked-shards" "s1-commit" "s2-stop-connection"
// make sure we error if we cannot get the lock on pg_dist_placement // make sure we error if we cannot get the lock on pg_dist_placement
permutation "s1-begin" "s1-lock-pg-dist-placement" "s2-drop-old-shards" "s1-commit" permutation "s1-begin" "s1-lock-pg-dist-placement" "s2-drop-old-shards" "s1-commit"
permutation "s1-begin" "s2-begin" "s2-select" "s1-move-placement-without-deferred" "s2-commit" "s1-commit"

View File

@ -8,6 +8,7 @@ SET search_path to fkey_to_reference_shard_rebalance;
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
SET citus.shard_count to 8; SET citus.shard_count to 8;
CREATE TYPE foreign_details AS (name text, relid text, refd_relid text); CREATE TYPE foreign_details AS (name text, relid text, refd_relid text);
CREATE VIEW table_fkeys_in_workers AS CREATE VIEW table_fkeys_in_workers AS
@ -44,12 +45,14 @@ SELECT master_move_shard_placement(15000009, 'localhost', :worker_1_port, 'local
SELECT count(*) FROM referencing_table2; SELECT count(*) FROM referencing_table2;
SELECT 1 FROM public.master_defer_delete_shards();
SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_to_reference_shard_rebalance.%' AND refd_relid LIKE 'fkey_to_reference_shard_rebalance.%' ORDER BY 1,2,3; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_to_reference_shard_rebalance.%' AND refd_relid LIKE 'fkey_to_reference_shard_rebalance.%' ORDER BY 1,2,3;
SELECT master_move_shard_placement(15000009, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'block_writes'); SELECT master_move_shard_placement(15000009, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'block_writes');
SELECT count(*) FROM referencing_table2; SELECT count(*) FROM referencing_table2;
SELECT 1 FROM public.master_defer_delete_shards();
SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_to_reference_shard_rebalance.%' AND refd_relid LIKE 'fkey_to_reference_shard_rebalance.%' ORDER BY 1,2,3; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_to_reference_shard_rebalance.%' AND refd_relid LIKE 'fkey_to_reference_shard_rebalance.%' ORDER BY 1,2,3;
-- create a function to show the -- create a function to show the

View File

@ -7,6 +7,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 13000000;
SET citus.shard_count TO 6; SET citus.shard_count TO 6;
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
-- create distributed tables -- create distributed tables
CREATE TABLE table1_group1 ( id int PRIMARY KEY); CREATE TABLE table1_group1 ( id int PRIMARY KEY);
SELECT create_distributed_table('table1_group1', 'id', 'hash'); SELECT create_distributed_table('table1_group1', 'id', 'hash');
@ -59,6 +60,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.table1_
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.table2_group1_13000006'::regclass; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.table2_group1_13000006'::regclass;
\c - - - :master_port \c - - - :master_port
-- copy colocated shards again to see error message -- copy colocated shards again to see error message
SELECT master_copy_shard_placement(13000000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, false, 'force_logical'); SELECT master_copy_shard_placement(13000000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, false, 'force_logical');
@ -140,6 +142,7 @@ WHERE
p.logicalrelid = s.logicalrelid AND p.logicalrelid = s.logicalrelid AND
s.shardid = sp.shardid AND s.shardid = sp.shardid AND
colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass)
AND sp.shardstate != 4
ORDER BY s.shardid, sp.nodeport; ORDER BY s.shardid, sp.nodeport;
-- also connect worker to verify we successfully moved given shard (and other colocated shards) -- also connect worker to verify we successfully moved given shard (and other colocated shards)
@ -149,6 +152,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.table2_
\c - - - :master_port \c - - - :master_port
-- test moving NOT colocated shard -- test moving NOT colocated shard
-- status before shard move -- status before shard move
SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport
@ -158,6 +162,7 @@ WHERE
p.logicalrelid = s.logicalrelid AND p.logicalrelid = s.logicalrelid AND
s.shardid = sp.shardid AND s.shardid = sp.shardid AND
p.logicalrelid = 'table5_groupX'::regclass p.logicalrelid = 'table5_groupX'::regclass
AND sp.shardstate != 4
ORDER BY s.shardid, sp.nodeport; ORDER BY s.shardid, sp.nodeport;
-- move NOT colocated shard -- move NOT colocated shard
@ -170,7 +175,8 @@ FROM
WHERE WHERE
p.logicalrelid = s.logicalrelid AND p.logicalrelid = s.logicalrelid AND
s.shardid = sp.shardid AND s.shardid = sp.shardid AND
p.logicalrelid = 'table5_groupX'::regclass p.logicalrelid = 'table5_groupX'::regclass AND
sp.shardstate != 4
ORDER BY s.shardid, sp.nodeport; ORDER BY s.shardid, sp.nodeport;
@ -183,6 +189,7 @@ WHERE
p.logicalrelid = s.logicalrelid AND p.logicalrelid = s.logicalrelid AND
s.shardid = sp.shardid AND s.shardid = sp.shardid AND
p.logicalrelid = 'table6_append'::regclass p.logicalrelid = 'table6_append'::regclass
AND sp.shardstate != 4
ORDER BY s.shardid, sp.nodeport; ORDER BY s.shardid, sp.nodeport;
-- move shard in append distributed table -- move shard in append distributed table
@ -195,7 +202,8 @@ FROM
WHERE WHERE
p.logicalrelid = s.logicalrelid AND p.logicalrelid = s.logicalrelid AND
s.shardid = sp.shardid AND s.shardid = sp.shardid AND
p.logicalrelid = 'table6_append'::regclass p.logicalrelid = 'table6_append'::regclass AND
sp.shardstate != 4
ORDER BY s.shardid, sp.nodeport; ORDER BY s.shardid, sp.nodeport;
@ -228,6 +236,7 @@ WHERE
p.logicalrelid = s.logicalrelid AND p.logicalrelid = s.logicalrelid AND
s.shardid = sp.shardid AND s.shardid = sp.shardid AND
colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass)
AND sp.shardstate != 4
ORDER BY s.shardid, sp.nodeport; ORDER BY s.shardid, sp.nodeport;
SELECT master_move_shard_placement(13000022, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'block_writes'); SELECT master_move_shard_placement(13000022, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'block_writes');
@ -240,6 +249,7 @@ WHERE
p.logicalrelid = s.logicalrelid AND p.logicalrelid = s.logicalrelid AND
s.shardid = sp.shardid AND s.shardid = sp.shardid AND
colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass)
AND sp.shardstate != 4
ORDER BY s.shardid, sp.nodeport; ORDER BY s.shardid, sp.nodeport;
-- also connect worker to verify we successfully moved given shard (and other colocated shards) -- also connect worker to verify we successfully moved given shard (and other colocated shards)
@ -254,6 +264,7 @@ SELECT "Constraint", "Definition" FROM table_fkeys
\c - - - :master_port \c - - - :master_port
-- test shard copy with foreign constraints -- test shard copy with foreign constraints
-- we expect it to error out because we do not support foreign constraints with replication factor > 1 -- we expect it to error out because we do not support foreign constraints with replication factor > 1
SELECT master_copy_shard_placement(13000022, 'localhost', :worker_2_port, 'localhost', :worker_1_port, false); SELECT master_copy_shard_placement(13000022, 'localhost', :worker_2_port, 'localhost', :worker_1_port, false);
@ -305,6 +316,7 @@ SELECT count(*) FROM move_partitions.events;
SELECT master_move_shard_placement(shardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port) SELECT master_move_shard_placement(shardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port)
FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
WHERE logicalrelid = 'move_partitions.events'::regclass AND nodeport = :worker_2_port WHERE logicalrelid = 'move_partitions.events'::regclass AND nodeport = :worker_2_port
AND shardstate != 4
ORDER BY shardid LIMIT 1; ORDER BY shardid LIMIT 1;
SELECT count(*) FROM move_partitions.events; SELECT count(*) FROM move_partitions.events;
@ -315,7 +327,7 @@ ALTER TABLE move_partitions.events_1 ADD CONSTRAINT e_1_pk PRIMARY KEY (id);
-- should be able to move automatically now -- should be able to move automatically now
SELECT master_move_shard_placement(shardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port) SELECT master_move_shard_placement(shardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port)
FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
WHERE logicalrelid = 'move_partitions.events'::regclass AND nodeport = :worker_2_port WHERE logicalrelid = 'move_partitions.events'::regclass AND nodeport = :worker_2_port AND shardstate != 4
ORDER BY shardid LIMIT 1; ORDER BY shardid LIMIT 1;
SELECT count(*) FROM move_partitions.events; SELECT count(*) FROM move_partitions.events;
@ -323,7 +335,7 @@ SELECT count(*) FROM move_partitions.events;
-- should also be able to move with block writes -- should also be able to move with block writes
SELECT master_move_shard_placement(shardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'block_writes') SELECT master_move_shard_placement(shardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'block_writes')
FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
WHERE logicalrelid = 'move_partitions.events'::regclass AND nodeport = :worker_2_port WHERE logicalrelid = 'move_partitions.events'::regclass AND nodeport = :worker_2_port AND shardstate != 4
ORDER BY shardid LIMIT 1; ORDER BY shardid LIMIT 1;
SELECT count(*) FROM move_partitions.events; SELECT count(*) FROM move_partitions.events;

View File

@ -87,9 +87,10 @@ SELECT
FROM FROM
pg_dist_shard NATURAL JOIN pg_dist_shard_placement pg_dist_shard NATURAL JOIN pg_dist_shard_placement
WHERE WHERE
logicalrelid = 'mx_table_1'::regclass (logicalrelid = 'mx_table_1'::regclass
OR logicalrelid = 'mx_table_2'::regclass OR logicalrelid = 'mx_table_2'::regclass
OR logicalrelid = 'mx_table_3'::regclass OR logicalrelid = 'mx_table_3'::regclass)
AND shardstate != 4
ORDER BY ORDER BY
logicalrelid, shardid; logicalrelid, shardid;

View File

@ -70,6 +70,9 @@ SELECT run_command_on_workers($cmd$
SELECT count(*) FROM pg_class WHERE relname = 't1_20000000'; SELECT count(*) FROM pg_class WHERE relname = 't1_20000000';
$cmd$); $cmd$);
-- we expect to get an error since the old placement is still there
SELECT master_move_shard_placement(20000000, 'localhost', :worker_2_port, 'localhost', :worker_1_port);
SELECT run_command_on_workers($cmd$ SELECT run_command_on_workers($cmd$
-- override the function for testing purpose -- override the function for testing purpose
@ -95,8 +98,6 @@ set citus.check_available_space_before_move to false;
SELECT master_move_shard_placement(20000001, 'localhost', :worker_2_port, 'localhost', :worker_1_port); SELECT master_move_shard_placement(20000001, 'localhost', :worker_2_port, 'localhost', :worker_1_port);
ROLLBACK; ROLLBACK;
-- we expect shard 0 to be on both of the workers
SELECT run_command_on_workers($cmd$ SELECT run_command_on_workers($cmd$
SELECT count(*) FROM pg_class WHERE relname = 't1_20000000'; SELECT count(*) FROM pg_class WHERE relname = 't1_20000000';
$cmd$); $cmd$);

View File

@ -13,7 +13,10 @@ SELECT 1 FROM master_add_node('localhost', :master_port, groupId=>0);
-- should just be noops even if we add the coordinator to the pg_dist_node -- should just be noops even if we add the coordinator to the pg_dist_node
SELECT rebalance_table_shards('dist_table_test'); SELECT rebalance_table_shards('dist_table_test');
SELECT public.master_defer_delete_shards();
SELECT rebalance_table_shards(); SELECT rebalance_table_shards();
SELECT public.master_defer_delete_shards();
-- test that calling rebalance_table_shards without specifying relation -- test that calling rebalance_table_shards without specifying relation
-- wouldn't move shard of the citus local table. -- wouldn't move shard of the citus local table.
@ -22,6 +25,7 @@ SELECT citus_add_local_table_to_metadata('citus_local_table');
INSERT INTO citus_local_table VALUES (1, 2); INSERT INTO citus_local_table VALUES (1, 2);
SELECT rebalance_table_shards(); SELECT rebalance_table_shards();
SELECT public.master_defer_delete_shards();
-- show that citus local table shard is still on the coordinator -- show that citus local table shard is still on the coordinator
SELECT tablename FROM pg_catalog.pg_tables where tablename like 'citus_local_table_%'; SELECT tablename FROM pg_catalog.pg_tables where tablename like 'citus_local_table_%';
@ -34,12 +38,14 @@ SELECT pg_reload_conf();
SELECT pg_sleep(.1); -- wait to make sure the config has changed before running the GUC SELECT pg_sleep(.1); -- wait to make sure the config has changed before running the GUC
SELECT master_drain_node('localhost', :master_port); SELECT master_drain_node('localhost', :master_port);
SELECT public.master_defer_delete_shards();
ALTER SYSTEM RESET citus.local_hostname; ALTER SYSTEM RESET citus.local_hostname;
SELECT pg_reload_conf(); SELECT pg_reload_conf();
SELECT pg_sleep(.1); -- wait to make sure the config has changed before running the GUC SELECT pg_sleep(.1); -- wait to make sure the config has changed before running the GUC
SELECT master_drain_node('localhost', :master_port); SELECT master_drain_node('localhost', :master_port);
SELECT public.master_defer_delete_shards();
-- show that citus local table shard is still on the coordinator -- show that citus local table shard is still on the coordinator
SELECT tablename FROM pg_catalog.pg_tables where tablename like 'citus_local_table_%'; SELECT tablename FROM pg_catalog.pg_tables where tablename like 'citus_local_table_%';
@ -286,6 +292,7 @@ SELECT master_create_distributed_table('replication_test_table', 'int_column', '
CREATE VIEW replication_test_table_placements_per_node AS CREATE VIEW replication_test_table_placements_per_node AS
SELECT count(*) FROM pg_dist_shard_placement NATURAL JOIN pg_dist_shard SELECT count(*) FROM pg_dist_shard_placement NATURAL JOIN pg_dist_shard
WHERE logicalrelid = 'replication_test_table'::regclass WHERE logicalrelid = 'replication_test_table'::regclass
AND shardstate != 4
GROUP BY nodename, nodeport GROUP BY nodename, nodeport
ORDER BY nodename, nodeport; ORDER BY nodename, nodeport;
@ -364,6 +371,7 @@ SELECT master_create_distributed_table('rebalance_test_table', 'int_column', 'ap
CREATE VIEW table_placements_per_node AS CREATE VIEW table_placements_per_node AS
SELECT nodeport, logicalrelid::regclass, count(*) SELECT nodeport, logicalrelid::regclass, count(*)
FROM pg_dist_shard_placement NATURAL JOIN pg_dist_shard FROM pg_dist_shard_placement NATURAL JOIN pg_dist_shard
WHERE shardstate != 4
GROUP BY logicalrelid::regclass, nodename, nodeport GROUP BY logicalrelid::regclass, nodename, nodeport
ORDER BY logicalrelid::regclass, nodename, nodeport; ORDER BY logicalrelid::regclass, nodename, nodeport;
@ -386,6 +394,7 @@ AS $$
pg_dist_shard_placement src USING (shardid), pg_dist_shard_placement src USING (shardid),
(SELECT nodename, nodeport FROM pg_dist_shard_placement ORDER BY nodeport DESC LIMIT 1) dst (SELECT nodename, nodeport FROM pg_dist_shard_placement ORDER BY nodeport DESC LIMIT 1) dst
WHERE src.nodeport < dst.nodeport AND s.logicalrelid = rel::regclass; WHERE src.nodeport < dst.nodeport AND s.logicalrelid = rel::regclass;
SELECT public.master_defer_delete_shards();
$$; $$;
CALL create_unbalanced_shards('rebalance_test_table'); CALL create_unbalanced_shards('rebalance_test_table');
@ -417,6 +426,7 @@ FROM (
FROM pg_dist_shard FROM pg_dist_shard
WHERE logicalrelid = 'rebalance_test_table'::regclass WHERE logicalrelid = 'rebalance_test_table'::regclass
) T; ) T;
SELECT public.master_defer_delete_shards();
ALTER SYSTEM RESET citus.local_hostname; ALTER SYSTEM RESET citus.local_hostname;
SELECT pg_reload_conf(); SELECT pg_reload_conf();
@ -433,6 +443,7 @@ FROM (
FROM pg_dist_shard FROM pg_dist_shard
WHERE logicalrelid = 'rebalance_test_table'::regclass WHERE logicalrelid = 'rebalance_test_table'::regclass
) T; ) T;
SELECT public.master_defer_delete_shards();
SELECT * FROM table_placements_per_node; SELECT * FROM table_placements_per_node;
@ -467,22 +478,26 @@ SELECT rebalance_table_shards('rebalance_test_table',
RESET ROLE; RESET ROLE;
-- Confirm no moves took place at all during these errors -- Confirm no moves took place at all during these errors
SELECT * FROM table_placements_per_node; SELECT * FROM table_placements_per_node;
SELECT public.master_defer_delete_shards();
SELECT rebalance_table_shards('rebalance_test_table', SELECT rebalance_table_shards('rebalance_test_table',
threshold := 0, max_shard_moves := 1, threshold := 0, max_shard_moves := 1,
shard_transfer_mode:='block_writes'); shard_transfer_mode:='block_writes');
SELECT public.master_defer_delete_shards();
SELECT * FROM table_placements_per_node; SELECT * FROM table_placements_per_node;
-- Check that threshold=1 doesn't move any shards -- Check that threshold=1 doesn't move any shards
SELECT rebalance_table_shards('rebalance_test_table', threshold := 1, shard_transfer_mode:='block_writes'); SELECT rebalance_table_shards('rebalance_test_table', threshold := 1, shard_transfer_mode:='block_writes');
SELECT public.master_defer_delete_shards();
SELECT * FROM table_placements_per_node; SELECT * FROM table_placements_per_node;
-- Move the remaining shards using threshold=0 -- Move the remaining shards using threshold=0
SELECT rebalance_table_shards('rebalance_test_table', threshold := 0); SELECT rebalance_table_shards('rebalance_test_table', threshold := 0);
SELECT public.master_defer_delete_shards();
SELECT * FROM table_placements_per_node; SELECT * FROM table_placements_per_node;
@ -490,6 +505,7 @@ SELECT * FROM table_placements_per_node;
-- any effects. -- any effects.
SELECT rebalance_table_shards('rebalance_test_table', threshold := 0, shard_transfer_mode:='block_writes'); SELECT rebalance_table_shards('rebalance_test_table', threshold := 0, shard_transfer_mode:='block_writes');
SELECT public.master_defer_delete_shards();
SELECT * FROM table_placements_per_node; SELECT * FROM table_placements_per_node;
@ -584,9 +600,11 @@ SELECT COUNT(*) FROM imbalanced_table;
-- Try force_logical -- Try force_logical
SELECT rebalance_table_shards('imbalanced_table', threshold:=0, shard_transfer_mode:='force_logical'); SELECT rebalance_table_shards('imbalanced_table', threshold:=0, shard_transfer_mode:='force_logical');
SELECT public.master_defer_delete_shards();
-- Test rebalance operation -- Test rebalance operation
SELECT rebalance_table_shards('imbalanced_table', threshold:=0, shard_transfer_mode:='block_writes'); SELECT rebalance_table_shards('imbalanced_table', threshold:=0, shard_transfer_mode:='block_writes');
SELECT public.master_defer_delete_shards();
-- Confirm rebalance -- Confirm rebalance
-- Shard counts in each node after rebalance -- Shard counts in each node after rebalance
@ -613,6 +631,7 @@ SELECT create_distributed_table('colocated_rebalance_test', 'id');
SELECT master_move_shard_placement(shardid, 'localhost', :worker_2_port, 'localhost', 10000, 'block_writes') SELECT master_move_shard_placement(shardid, 'localhost', :worker_2_port, 'localhost', 10000, 'block_writes')
FROM pg_dist_shard_placement FROM pg_dist_shard_placement
WHERE nodeport = :worker_2_port; WHERE nodeport = :worker_2_port;
SELECT public.master_defer_delete_shards();
-- Try to move shards to a node where shards are not allowed -- Try to move shards to a node where shards are not allowed
SELECT * from master_set_node_property('localhost', :worker_1_port, 'shouldhaveshards', false); SELECT * from master_set_node_property('localhost', :worker_1_port, 'shouldhaveshards', false);
@ -639,6 +658,7 @@ UPDATE pg_dist_node SET noderole = 'primary' WHERE nodeport = :worker_1_port;
SELECT master_move_shard_placement(shardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'block_writes') SELECT master_move_shard_placement(shardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'block_writes')
FROM pg_dist_shard_placement FROM pg_dist_shard_placement
WHERE nodeport = :worker_2_port; WHERE nodeport = :worker_2_port;
SELECT public.master_defer_delete_shards();
SELECT create_distributed_table('colocated_rebalance_test2', 'id'); SELECT create_distributed_table('colocated_rebalance_test2', 'id');
@ -649,6 +669,7 @@ SELECT * FROM public.table_placements_per_node;
SELECT * FROM get_rebalance_table_shards_plan('colocated_rebalance_test', threshold := 0, drain_only := true); SELECT * FROM get_rebalance_table_shards_plan('colocated_rebalance_test', threshold := 0, drain_only := true);
-- Running with drain_only shouldn't do anything -- Running with drain_only shouldn't do anything
SELECT * FROM rebalance_table_shards('colocated_rebalance_test', threshold := 0, shard_transfer_mode := 'block_writes', drain_only := true); SELECT * FROM rebalance_table_shards('colocated_rebalance_test', threshold := 0, shard_transfer_mode := 'block_writes', drain_only := true);
SELECT public.master_defer_delete_shards();
-- Confirm that nothing changed -- Confirm that nothing changed
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
@ -661,6 +682,7 @@ SELECT * FROM get_rebalance_table_shards_plan('colocated_rebalance_test', rebala
SELECT * FROM get_rebalance_progress(); SELECT * FROM get_rebalance_progress();
-- Actually do the rebalance -- Actually do the rebalance
SELECT * FROM rebalance_table_shards('colocated_rebalance_test', threshold := 0, shard_transfer_mode := 'block_writes'); SELECT * FROM rebalance_table_shards('colocated_rebalance_test', threshold := 0, shard_transfer_mode := 'block_writes');
SELECT public.master_defer_delete_shards();
-- Check that we can call this function without a crash -- Check that we can call this function without a crash
SELECT * FROM get_rebalance_progress(); SELECT * FROM get_rebalance_progress();
@ -678,18 +700,22 @@ SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaves
SELECT * FROM get_rebalance_table_shards_plan('colocated_rebalance_test', threshold := 0); SELECT * FROM get_rebalance_table_shards_plan('colocated_rebalance_test', threshold := 0);
SELECT * FROM rebalance_table_shards('colocated_rebalance_test', threshold := 0, shard_transfer_mode := 'block_writes'); SELECT * FROM rebalance_table_shards('colocated_rebalance_test', threshold := 0, shard_transfer_mode := 'block_writes');
SELECT public.master_defer_delete_shards();
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
SELECT * FROM get_rebalance_table_shards_plan('non_colocated_rebalance_test', threshold := 0); SELECT * FROM get_rebalance_table_shards_plan('non_colocated_rebalance_test', threshold := 0);
SELECT * FROM rebalance_table_shards('non_colocated_rebalance_test', threshold := 0, shard_transfer_mode := 'block_writes'); SELECT * FROM rebalance_table_shards('non_colocated_rebalance_test', threshold := 0, shard_transfer_mode := 'block_writes');
SELECT public.master_defer_delete_shards();
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
-- Put shards back -- Put shards back
SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', true); SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', true);
SELECT * FROM rebalance_table_shards('colocated_rebalance_test', threshold := 0, shard_transfer_mode := 'block_writes'); SELECT * FROM rebalance_table_shards('colocated_rebalance_test', threshold := 0, shard_transfer_mode := 'block_writes');
SELECT public.master_defer_delete_shards();
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
SELECT * FROM rebalance_table_shards('non_colocated_rebalance_test', threshold := 0, shard_transfer_mode := 'block_writes'); SELECT * FROM rebalance_table_shards('non_colocated_rebalance_test', threshold := 0, shard_transfer_mode := 'block_writes');
SELECT public.master_defer_delete_shards();
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
-- testing behaviour when setting shouldhaveshards to false and rebalancing all -- testing behaviour when setting shouldhaveshards to false and rebalancing all
@ -697,11 +723,13 @@ SELECT * FROM public.table_placements_per_node;
SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', false); SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', false);
SELECT * FROM get_rebalance_table_shards_plan(threshold := 0, drain_only := true); SELECT * FROM get_rebalance_table_shards_plan(threshold := 0, drain_only := true);
SELECT * FROM rebalance_table_shards(threshold := 0, shard_transfer_mode := 'block_writes', drain_only := true); SELECT * FROM rebalance_table_shards(threshold := 0, shard_transfer_mode := 'block_writes', drain_only := true);
SELECT public.master_defer_delete_shards();
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
-- Put shards back -- Put shards back
SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', true); SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', true);
SELECT * FROM rebalance_table_shards(threshold := 0, shard_transfer_mode := 'block_writes'); SELECT * FROM rebalance_table_shards(threshold := 0, shard_transfer_mode := 'block_writes');
SELECT public.master_defer_delete_shards();
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
-- testing behaviour when setting shouldhaveshards to false and rebalancing all -- testing behaviour when setting shouldhaveshards to false and rebalancing all
@ -709,11 +737,13 @@ SELECT * FROM public.table_placements_per_node;
SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', false); SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', false);
SELECT * FROM get_rebalance_table_shards_plan(threshold := 0); SELECT * FROM get_rebalance_table_shards_plan(threshold := 0);
SELECT * FROM rebalance_table_shards(threshold := 0, shard_transfer_mode := 'block_writes'); SELECT * FROM rebalance_table_shards(threshold := 0, shard_transfer_mode := 'block_writes');
SELECT public.master_defer_delete_shards();
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
-- Put shards back -- Put shards back
SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', true); SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', true);
SELECT * FROM rebalance_table_shards(threshold := 0, shard_transfer_mode := 'block_writes'); SELECT * FROM rebalance_table_shards(threshold := 0, shard_transfer_mode := 'block_writes');
SELECT public.master_defer_delete_shards();
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
-- Make it a data node again -- Make it a data node again
@ -721,12 +751,14 @@ SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaves
-- testing behaviour of master_drain_node -- testing behaviour of master_drain_node
SELECT * from master_drain_node('localhost', :worker_2_port, shard_transfer_mode := 'block_writes'); SELECT * from master_drain_node('localhost', :worker_2_port, shard_transfer_mode := 'block_writes');
SELECT public.master_defer_delete_shards();
select shouldhaveshards from pg_dist_node where nodeport = :worker_2_port; select shouldhaveshards from pg_dist_node where nodeport = :worker_2_port;
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
-- Put shards back -- Put shards back
SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', true); SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', true);
SELECT * FROM rebalance_table_shards(threshold := 0, shard_transfer_mode := 'block_writes'); SELECT * FROM rebalance_table_shards(threshold := 0, shard_transfer_mode := 'block_writes');
SELECT public.master_defer_delete_shards();
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
@ -795,12 +827,15 @@ SELECT * FROM get_rebalance_table_shards_plan('tab', rebalance_strategy := 'by_d
SELECT * FROM get_rebalance_table_shards_plan('tab', rebalance_strategy := 'by_disk_size', threshold := 0); SELECT * FROM get_rebalance_table_shards_plan('tab', rebalance_strategy := 'by_disk_size', threshold := 0);
SELECT * FROM rebalance_table_shards('tab', shard_transfer_mode:='block_writes'); SELECT * FROM rebalance_table_shards('tab', shard_transfer_mode:='block_writes');
SELECT public.master_defer_delete_shards();
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
SELECT * FROM rebalance_table_shards('tab', rebalance_strategy := 'by_disk_size', shard_transfer_mode:='block_writes'); SELECT * FROM rebalance_table_shards('tab', rebalance_strategy := 'by_disk_size', shard_transfer_mode:='block_writes');
SELECT public.master_defer_delete_shards();
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
SELECT * FROM rebalance_table_shards('tab', rebalance_strategy := 'by_disk_size', shard_transfer_mode:='block_writes', threshold := 0); SELECT * FROM rebalance_table_shards('tab', rebalance_strategy := 'by_disk_size', shard_transfer_mode:='block_writes', threshold := 0);
SELECT public.master_defer_delete_shards();
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
-- Check that sizes of colocated tables are added together for rebalances -- Check that sizes of colocated tables are added together for rebalances
@ -851,6 +886,7 @@ SELECT * FROM get_rebalance_table_shards_plan('tab', rebalance_strategy := 'by_d
-- supports improvement_threshold -- supports improvement_threshold
SELECT * FROM get_rebalance_table_shards_plan('tab', rebalance_strategy := 'by_disk_size', improvement_threshold := 0); SELECT * FROM get_rebalance_table_shards_plan('tab', rebalance_strategy := 'by_disk_size', improvement_threshold := 0);
SELECT * FROM rebalance_table_shards('tab', rebalance_strategy := 'by_disk_size', shard_transfer_mode:='block_writes'); SELECT * FROM rebalance_table_shards('tab', rebalance_strategy := 'by_disk_size', shard_transfer_mode:='block_writes');
SELECT public.master_defer_delete_shards();
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
ANALYZE tab, tab2; ANALYZE tab, tab2;
@ -907,11 +943,13 @@ SELECT citus_add_rebalance_strategy(
SELECT * FROM get_rebalance_table_shards_plan('tab', rebalance_strategy := 'capacity_high_worker_2'); SELECT * FROM get_rebalance_table_shards_plan('tab', rebalance_strategy := 'capacity_high_worker_2');
SELECT * FROM rebalance_table_shards('tab', rebalance_strategy := 'capacity_high_worker_2', shard_transfer_mode:='block_writes'); SELECT * FROM rebalance_table_shards('tab', rebalance_strategy := 'capacity_high_worker_2', shard_transfer_mode:='block_writes');
SELECT public.master_defer_delete_shards();
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
SELECT citus_set_default_rebalance_strategy('capacity_high_worker_2'); SELECT citus_set_default_rebalance_strategy('capacity_high_worker_2');
SELECT * FROM get_rebalance_table_shards_plan('tab'); SELECT * FROM get_rebalance_table_shards_plan('tab');
SELECT * FROM rebalance_table_shards('tab', shard_transfer_mode:='block_writes'); SELECT * FROM rebalance_table_shards('tab', shard_transfer_mode:='block_writes');
SELECT public.master_defer_delete_shards();
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
CREATE FUNCTION only_worker_1(shardid bigint, nodeidarg int) CREATE FUNCTION only_worker_1(shardid bigint, nodeidarg int)
@ -932,6 +970,7 @@ SELECT citus_add_rebalance_strategy(
SELECT citus_set_default_rebalance_strategy('only_worker_1'); SELECT citus_set_default_rebalance_strategy('only_worker_1');
SELECT * FROM get_rebalance_table_shards_plan('tab'); SELECT * FROM get_rebalance_table_shards_plan('tab');
SELECT * FROM rebalance_table_shards('tab', shard_transfer_mode:='block_writes'); SELECT * FROM rebalance_table_shards('tab', shard_transfer_mode:='block_writes');
SELECT public.master_defer_delete_shards();
SELECT * FROM public.table_placements_per_node; SELECT * FROM public.table_placements_per_node;
SELECT citus_set_default_rebalance_strategy('by_shard_count'); SELECT citus_set_default_rebalance_strategy('by_shard_count');
@ -940,14 +979,18 @@ SELECT * FROM get_rebalance_table_shards_plan('tab');
-- Check all the error handling cases -- Check all the error handling cases
SELECT * FROM get_rebalance_table_shards_plan('tab', rebalance_strategy := 'non_existing'); SELECT * FROM get_rebalance_table_shards_plan('tab', rebalance_strategy := 'non_existing');
SELECT * FROM rebalance_table_shards('tab', rebalance_strategy := 'non_existing'); SELECT * FROM rebalance_table_shards('tab', rebalance_strategy := 'non_existing');
SELECT public.master_defer_delete_shards();
SELECT * FROM master_drain_node('localhost', :worker_2_port, rebalance_strategy := 'non_existing'); SELECT * FROM master_drain_node('localhost', :worker_2_port, rebalance_strategy := 'non_existing');
SELECT public.master_defer_delete_shards();
SELECT citus_set_default_rebalance_strategy('non_existing'); SELECT citus_set_default_rebalance_strategy('non_existing');
UPDATE pg_dist_rebalance_strategy SET default_strategy=false; UPDATE pg_dist_rebalance_strategy SET default_strategy=false;
SELECT * FROM get_rebalance_table_shards_plan('tab'); SELECT * FROM get_rebalance_table_shards_plan('tab');
SELECT * FROM rebalance_table_shards('tab'); SELECT * FROM rebalance_table_shards('tab');
SELECT public.master_defer_delete_shards();
SELECT * FROM master_drain_node('localhost', :worker_2_port); SELECT * FROM master_drain_node('localhost', :worker_2_port);
SELECT public.master_defer_delete_shards();
UPDATE pg_dist_rebalance_strategy SET default_strategy=true WHERE name='by_shard_count'; UPDATE pg_dist_rebalance_strategy SET default_strategy=true WHERE name='by_shard_count';
CREATE OR REPLACE FUNCTION shard_cost_no_arguments() CREATE OR REPLACE FUNCTION shard_cost_no_arguments()
@ -1172,6 +1215,7 @@ SELECT 1 FROM master_add_node('localhost', :master_port, groupId=>0);
SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'ref_table'::regclass; SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'ref_table'::regclass;
SELECT rebalance_table_shards('rebalance_test_table', shard_transfer_mode:='block_writes'); SELECT rebalance_table_shards('rebalance_test_table', shard_transfer_mode:='block_writes');
SELECT public.master_defer_delete_shards();
SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'ref_table'::regclass; SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'ref_table'::regclass;
@ -1204,6 +1248,7 @@ INSERT INTO r2 VALUES (1,2), (3,4);
SELECT 1 from master_add_node('localhost', :worker_2_port); SELECT 1 from master_add_node('localhost', :worker_2_port);
SELECT rebalance_table_shards(); SELECT rebalance_table_shards();
SELECT public.master_defer_delete_shards();
DROP TABLE t1, r1, r2; DROP TABLE t1, r1, r2;
@ -1230,6 +1275,7 @@ WHERE logicalrelid = 'r1'::regclass;
-- rebalance with _only_ a reference table, this should trigger the copy -- rebalance with _only_ a reference table, this should trigger the copy
SELECT rebalance_table_shards(); SELECT rebalance_table_shards();
SELECT public.master_defer_delete_shards();
-- verify the reference table is on all nodes after the rebalance -- verify the reference table is on all nodes after the rebalance
SELECT count(*) SELECT count(*)