Cleans up test outputs (#6434)

pull/6429/head
Naisila Puka 2022-10-17 15:13:07 +03:00 committed by GitHub
parent 82ea76bc0c
commit 8323f4f12c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 21 additions and 1504 deletions

View File

@ -56,7 +56,6 @@ master_update_node
step s2-abort: ABORT;
step s1-abort: ABORT;
FATAL: terminating connection due to administrator command
FATAL: terminating connection due to administrator command
SSL connection has been closed unexpectedly
master_remove_node

View File

@ -2,7 +2,9 @@ Parsed test spec with 2 sessions
starting permutation: s1-begin s1-insert s2-begin s2-update-node-1 s1-abort s2-abort
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-begin: BEGIN;
step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100);
@ -17,17 +19,23 @@ step s2-update-node-1:
step s1-abort: ABORT;
step s2-update-node-1: <... completed>
master_update_node
---------------------------------------------------------------------
(1 row)
step s2-abort: ABORT;
master_remove_node
---------------------------------------------------------------------
(2 rows)
starting permutation: s1-begin s1-insert s2-begin s2-update-node-1-force s2-abort s1-abort
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-begin: BEGIN;
step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100);
@ -41,15 +49,19 @@ step s2-update-node-1-force:
<waiting ...>
step s2-update-node-1-force: <... completed>
master_update_node
---------------------------------------------------------------------
(1 row)
step s2-abort: ABORT;
step s1-abort: ABORT;
WARNING: this step had a leftover error message
FATAL: terminating connection due to administrator command
server closed the connection unexpectedly
FATAL: terminating connection due to administrator command
SSL connection has been closed unexpectedly
master_remove_node
---------------------------------------------------------------------
(2 rows)

View File

@ -56,7 +56,7 @@ master_update_node
step s2-abort: ABORT;
step s1-abort: ABORT;
FATAL: terminating connection due to administrator command
SSL connection has been closed unexpectedly
server closed the connection unexpectedly
master_remove_node
---------------------------------------------------------------------

View File

@ -1,66 +0,0 @@
Parsed test spec with 2 sessions
starting permutation: s1-begin s1-insert s2-begin s2-update-node-1 s1-abort s2-abort
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-begin: BEGIN;
step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100);
step s2-begin: BEGIN;
step s2-update-node-1:
-- update a specific node by address
SELECT master_update_node(nodeid, 'localhost', nodeport + 10)
FROM pg_dist_node
WHERE nodename = 'localhost'
AND nodeport = 57637;
<waiting ...>
step s1-abort: ABORT;
step s2-update-node-1: <... completed>
master_update_node
---------------------------------------------------------------------
(1 row)
step s2-abort: ABORT;
master_remove_node
---------------------------------------------------------------------
(2 rows)
starting permutation: s1-begin s1-insert s2-begin s2-update-node-1-force s2-abort s1-abort
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-begin: BEGIN;
step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100);
step s2-begin: BEGIN;
step s2-update-node-1-force:
-- update a specific node by address (force)
SELECT master_update_node(nodeid, 'localhost', nodeport + 10, force => true, lock_cooldown => 100)
FROM pg_dist_node
WHERE nodename = 'localhost'
AND nodeport = 57637;
<waiting ...>
step s2-update-node-1-force: <... completed>
master_update_node
---------------------------------------------------------------------
(1 row)
step s2-abort: ABORT;
step s1-abort: ABORT;
FATAL: terminating connection due to administrator command
server closed the connection unexpectedly
master_remove_node
---------------------------------------------------------------------
(2 rows)

View File

@ -1,499 +0,0 @@
Parsed test spec with 3 sessions
starting permutation: s2-add-fkey s1-insert-referenced s1-insert-referencing s1-begin s2-begin s2-move-placement-blocking s1-delete s2-commit s1-commit s2-print-cluster
step s2-add-fkey:
ALTER TABLE referencing_table ADD CONSTRAINT fkey_const FOREIGN KEY (value) REFERENCES referenced_table(id) ON DELETE CASCADE;
step s1-insert-referenced:
INSERT INTO referenced_table SELECT x,x FROM generate_series(1,10) as f(x);
step s1-insert-referencing:
INSERT INTO referencing_table SELECT x,x FROM generate_series(1,10) as f(x);
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s2-move-placement-blocking:
SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57638, 'localhost', 57637, shard_transfer_mode:='block_writes');
master_move_shard_placement
step s1-delete:
DELETE FROM referenced_table WHERE id < 5;
<waiting ...>
step s2-commit:
COMMIT;
step s1-delete: <... completed>
step s1-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('referencing_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT * FROM referencing_table ORDER BY 1;
nodeport shardid success result
57637 102159 t 4
57637 102160 t 2
id value
5 5
6 6
7 7
8 8
9 9
10 10
starting permutation: s2-add-fkey s1-insert-referenced s1-insert-referencing s1-begin s2-begin s2-move-placement-blocking s1-update s2-commit s1-commit s2-print-cluster
step s2-add-fkey:
ALTER TABLE referencing_table ADD CONSTRAINT fkey_const FOREIGN KEY (value) REFERENCES referenced_table(id) ON DELETE CASCADE;
step s1-insert-referenced:
INSERT INTO referenced_table SELECT x,x FROM generate_series(1,10) as f(x);
step s1-insert-referencing:
INSERT INTO referencing_table SELECT x,x FROM generate_series(1,10) as f(x);
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s2-move-placement-blocking:
SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57638, 'localhost', 57637, shard_transfer_mode:='block_writes');
master_move_shard_placement
step s1-update:
UPDATE referenced_table SET value = 5 WHERE id = 5;
<waiting ...>
step s2-commit:
COMMIT;
step s1-update: <... completed>
step s1-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('referencing_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT * FROM referencing_table ORDER BY 1;
nodeport shardid success result
57637 102162 t 7
57637 102163 t 3
id value
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
10 10
starting permutation: s2-add-fkey s1-insert-referenced s1-insert-referencing s1-begin s2-begin s2-move-placement-blocking s1-ddl s2-commit s1-commit s2-print-cluster
step s2-add-fkey:
ALTER TABLE referencing_table ADD CONSTRAINT fkey_const FOREIGN KEY (value) REFERENCES referenced_table(id) ON DELETE CASCADE;
step s1-insert-referenced:
INSERT INTO referenced_table SELECT x,x FROM generate_series(1,10) as f(x);
step s1-insert-referencing:
INSERT INTO referencing_table SELECT x,x FROM generate_series(1,10) as f(x);
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s2-move-placement-blocking:
SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57638, 'localhost', 57637, shard_transfer_mode:='block_writes');
master_move_shard_placement
step s1-ddl:
CREATE INDEX referenced_table_index ON referenced_table(id);
<waiting ...>
step s2-commit:
COMMIT;
step s1-ddl: <... completed>
step s1-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('referencing_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT * FROM referencing_table ORDER BY 1;
nodeport shardid success result
57637 102165 t 7
57637 102166 t 3
id value
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
10 10
starting permutation: s2-add-fkey s1-insert-referenced s1-begin s2-begin s2-move-placement-blocking s1-insert-referencing s2-commit s1-commit s2-print-cluster
step s2-add-fkey:
ALTER TABLE referencing_table ADD CONSTRAINT fkey_const FOREIGN KEY (value) REFERENCES referenced_table(id) ON DELETE CASCADE;
step s1-insert-referenced:
INSERT INTO referenced_table SELECT x,x FROM generate_series(1,10) as f(x);
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s2-move-placement-blocking:
SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57638, 'localhost', 57637, shard_transfer_mode:='block_writes');
master_move_shard_placement
step s1-insert-referencing:
INSERT INTO referencing_table SELECT x,x FROM generate_series(1,10) as f(x);
<waiting ...>
step s2-commit:
COMMIT;
step s1-insert-referencing: <... completed>
step s1-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('referencing_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT * FROM referencing_table ORDER BY 1;
nodeport shardid success result
57637 102168 t 7
57637 102169 t 3
id value
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
10 10
starting permutation: s2-add-fkey s3-acquire-advisory-lock s1-insert-referenced s1-insert-referencing s2-begin s2-move-placement-nonblocking s1-delete s3-release-advisory-lock s2-commit s2-print-cluster
step s2-add-fkey:
ALTER TABLE referencing_table ADD CONSTRAINT fkey_const FOREIGN KEY (value) REFERENCES referenced_table(id) ON DELETE CASCADE;
step s3-acquire-advisory-lock:
SELECT pg_advisory_lock(44000, 55152);
pg_advisory_lock
step s1-insert-referenced:
INSERT INTO referenced_table SELECT x,x FROM generate_series(1,10) as f(x);
step s1-insert-referencing:
INSERT INTO referencing_table SELECT x,x FROM generate_series(1,10) as f(x);
step s2-begin:
BEGIN;
step s2-move-placement-nonblocking:
SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57638, 'localhost', 57637);
master_move_shard_placement
step s1-delete:
DELETE FROM referenced_table WHERE id < 5;
<waiting ...>
step s3-release-advisory-lock:
SELECT pg_advisory_unlock(44000, 55152);
pg_advisory_unlock
t
step s2-commit:
COMMIT;
step s1-delete: <... completed>
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('referencing_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT * FROM referencing_table ORDER BY 1;
nodeport shardid success result
57637 102171 t 4
57637 102172 t 2
id value
5 5
6 6
7 7
8 8
9 9
10 10
starting permutation: s2-add-fkey s3-acquire-advisory-lock s1-insert-referenced s1-insert-referencing s2-begin s2-move-placement-nonblocking s1-update s3-release-advisory-lock s2-commit s2-print-cluster
step s2-add-fkey:
ALTER TABLE referencing_table ADD CONSTRAINT fkey_const FOREIGN KEY (value) REFERENCES referenced_table(id) ON DELETE CASCADE;
step s3-acquire-advisory-lock:
SELECT pg_advisory_lock(44000, 55152);
pg_advisory_lock
step s1-insert-referenced:
INSERT INTO referenced_table SELECT x,x FROM generate_series(1,10) as f(x);
step s1-insert-referencing:
INSERT INTO referencing_table SELECT x,x FROM generate_series(1,10) as f(x);
step s2-begin:
BEGIN;
step s2-move-placement-nonblocking:
SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57638, 'localhost', 57637);
master_move_shard_placement
step s1-update:
UPDATE referenced_table SET value = 5 WHERE id = 5;
<waiting ...>
step s3-release-advisory-lock:
SELECT pg_advisory_unlock(44000, 55152);
pg_advisory_unlock
t
step s2-commit:
COMMIT;
step s1-update: <... completed>
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('referencing_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT * FROM referencing_table ORDER BY 1;
nodeport shardid success result
57637 102174 t 7
57637 102175 t 3
id value
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
10 10
starting permutation: s2-add-fkey s3-acquire-advisory-lock s1-insert-referenced s1-insert-referencing s2-begin s2-move-placement-nonblocking s1-ddl s3-release-advisory-lock s2-commit s2-print-cluster
step s2-add-fkey:
ALTER TABLE referencing_table ADD CONSTRAINT fkey_const FOREIGN KEY (value) REFERENCES referenced_table(id) ON DELETE CASCADE;
step s3-acquire-advisory-lock:
SELECT pg_advisory_lock(44000, 55152);
pg_advisory_lock
step s1-insert-referenced:
INSERT INTO referenced_table SELECT x,x FROM generate_series(1,10) as f(x);
step s1-insert-referencing:
INSERT INTO referencing_table SELECT x,x FROM generate_series(1,10) as f(x);
step s2-begin:
BEGIN;
step s2-move-placement-nonblocking:
SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57638, 'localhost', 57637);
master_move_shard_placement
step s1-ddl:
CREATE INDEX referenced_table_index ON referenced_table(id);
<waiting ...>
step s3-release-advisory-lock:
SELECT pg_advisory_unlock(44000, 55152);
pg_advisory_unlock
t
step s2-commit:
COMMIT;
step s1-ddl: <... completed>
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('referencing_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT * FROM referencing_table ORDER BY 1;
nodeport shardid success result
57637 102177 t 7
57637 102178 t 3
id value
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
10 10
starting permutation: s2-add-fkey s3-acquire-advisory-lock s1-insert-referenced s2-begin s2-move-placement-nonblocking s1-insert-referencing s3-release-advisory-lock s2-commit s2-print-cluster
step s2-add-fkey:
ALTER TABLE referencing_table ADD CONSTRAINT fkey_const FOREIGN KEY (value) REFERENCES referenced_table(id) ON DELETE CASCADE;
step s3-acquire-advisory-lock:
SELECT pg_advisory_lock(44000, 55152);
pg_advisory_lock
step s1-insert-referenced:
INSERT INTO referenced_table SELECT x,x FROM generate_series(1,10) as f(x);
step s2-begin:
BEGIN;
step s2-move-placement-nonblocking:
SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57638, 'localhost', 57637);
master_move_shard_placement
step s1-insert-referencing:
INSERT INTO referencing_table SELECT x,x FROM generate_series(1,10) as f(x);
<waiting ...>
step s3-release-advisory-lock:
SELECT pg_advisory_unlock(44000, 55152);
pg_advisory_unlock
t
step s2-commit:
COMMIT;
step s1-insert-referencing: <... completed>
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('referencing_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT * FROM referencing_table ORDER BY 1;
nodeport shardid success result
57637 102180 t 7
57637 102181 t 3
id value
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
10 10

View File

@ -1,251 +0,0 @@
Parsed test spec with 2 sessions
starting permutation: s2-add-fkey s1-begin s2-begin s2-isolate-tenant s1-delete s2-commit s1-commit s2-print-cluster
create_distributed_table
step s2-add-fkey:
ALTER TABLE isolation_table ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id);
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5);
isolate_tenant_to_new_shard
102258
step s1-delete:
DELETE FROM reference_table WHERE id = 5;
<waiting ...>
step s2-commit:
COMMIT;
step s1-delete: <... completed>
step s1-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('isolation_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM isolation_table ORDER BY id, value;
nodeport shardid success result
57637 102257 t 0
57637 102258 t 0
57637 102259 t 0
57638 102256 t 0
id value
starting permutation: s2-add-fkey s1-begin s2-begin s2-isolate-tenant s1-update s2-commit s1-commit s2-print-cluster
create_distributed_table
step s2-add-fkey:
ALTER TABLE isolation_table ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id);
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5);
isolate_tenant_to_new_shard
102264
step s1-update:
UPDATE reference_table SET value = 5 WHERE id = 5;
<waiting ...>
step s2-commit:
COMMIT;
step s1-update: <... completed>
step s1-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('isolation_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM isolation_table ORDER BY id, value;
nodeport shardid success result
57637 102263 t 0
57637 102264 t 0
57637 102265 t 0
57638 102262 t 0
id value
starting permutation: s2-add-fkey s1-begin s2-begin s2-isolate-tenant s1-insert s2-commit s1-commit s2-print-cluster
create_distributed_table
step s2-add-fkey:
ALTER TABLE isolation_table ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id);
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5);
isolate_tenant_to_new_shard
102270
step s1-insert:
INSERT INTO reference_table VALUES (5, 10);
<waiting ...>
step s2-commit:
COMMIT;
step s1-insert: <... completed>
step s1-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('isolation_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM isolation_table ORDER BY id, value;
nodeport shardid success result
57637 102269 t 0
57637 102270 t 0
57637 102271 t 0
57638 102268 t 0
id value
starting permutation: s2-add-fkey s1-begin s2-begin s2-isolate-tenant s1-copy s2-commit s1-commit s2-print-cluster
create_distributed_table
step s2-add-fkey:
ALTER TABLE isolation_table ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id);
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5);
isolate_tenant_to_new_shard
102276
step s1-copy:
COPY reference_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV;
<waiting ...>
step s2-commit:
COMMIT;
step s1-copy: <... completed>
step s1-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('isolation_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM isolation_table ORDER BY id, value;
nodeport shardid success result
57637 102275 t 0
57637 102276 t 0
57637 102277 t 0
57638 102274 t 0
id value
starting permutation: s2-add-fkey s1-begin s2-begin s2-isolate-tenant s1-ddl s2-commit s1-commit s2-print-cluster
create_distributed_table
step s2-add-fkey:
ALTER TABLE isolation_table ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id);
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5);
isolate_tenant_to_new_shard
102282
step s1-ddl:
CREATE INDEX reference_table_index ON reference_table(id);
<waiting ...>
step s2-commit:
COMMIT;
step s1-ddl: <... completed>
step s1-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('isolation_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM isolation_table ORDER BY id, value;
nodeport shardid success result
57637 102281 t 0
57637 102282 t 0
57637 102283 t 0
57638 102280 t 0
id value

View File

@ -1,121 +0,0 @@
-- File to create functions and helpers needed for split shard tests
-- Populates shared memory mapping for parent shard with id 1.
-- targetNode1, targetNode2 are the locations where child shard xxxxx and 3 are placed respectively
CREATE OR REPLACE FUNCTION split_shard_replication_setup_helper(targetNode1 integer, targetNode2 integer) RETURNS text AS $$
DECLARE
memoryId bigint := 0;
memoryIdText text;
begin
SELECT * into memoryId from worker_split_shard_replication_setup(ARRAY[ARRAY[1,2,-2147483648,-1, targetNode1], ARRAY[1,3,0,2147483647,targetNode2]]);
SELECT FORMAT('%s', memoryId) into memoryIdText;
return memoryIdText;
end
$$ LANGUAGE plpgsql;
-- Create replication slots for targetNode1 and targetNode2 incase of non-colocated shards
CREATE OR REPLACE FUNCTION create_replication_slot(targetNode1 integer, targetNode2 integer) RETURNS text AS $$
DECLARE
targetOneSlotName text;
targetTwoSlotName text;
sharedMemoryId text;
derivedSlotName text;
begin
SELECT * into sharedMemoryId from public.split_shard_replication_setup_helper(targetNode1, targetNode2);
SELECT FORMAT('citus_split_%s_%s_10', targetNode1, sharedMemoryId) into derivedSlotName;
SELECT slot_name into targetOneSlotName from pg_create_logical_replication_slot(derivedSlotName, 'decoding_plugin_for_shard_split');
-- if new child shards are placed on different nodes, create one more replication slot
if (targetNode1 != targetNode2) then
SELECT FORMAT('citus_split_%s_%s_10', targetNode2, sharedMemoryId) into derivedSlotName;
SELECT slot_name into targetTwoSlotName from pg_create_logical_replication_slot(derivedSlotName, 'decoding_plugin_for_shard_split');
INSERT INTO slotName_table values(targetTwoSlotName, targetNode2, 1);
end if;
INSERT INTO slotName_table values(targetOneSlotName, targetNode1, 2);
return targetOneSlotName;
end
$$ LANGUAGE plpgsql;
-- Populates shared memory mapping for colocated parent shards 4 and 7.
-- shard xxxxx has child shards 5 and 6. Shard 7 has child shards 8 and 9.
CREATE OR REPLACE FUNCTION split_shard_replication_setup_for_colocated_shards(targetNode1 integer, targetNode2 integer) RETURNS text AS $$
DECLARE
memoryId bigint := 0;
memoryIdText text;
begin
SELECT * into memoryId from worker_split_shard_replication_setup(
ARRAY[
ARRAY[4, 5, -2147483648,-1, targetNode1],
ARRAY[4, 6, 0 ,2147483647, targetNode2],
ARRAY[7, 8, -2147483648,-1, targetNode1],
ARRAY[7, 9, 0, 2147483647 , targetNode2]
]);
SELECT FORMAT('%s', memoryId) into memoryIdText;
return memoryIdText;
end
$$ LANGUAGE plpgsql;
-- Create replication slots for targetNode1 and targetNode2 incase of colocated shards
CREATE OR REPLACE FUNCTION create_replication_slot_for_colocated_shards(targetNode1 integer, targetNode2 integer) RETURNS text AS $$
DECLARE
targetOneSlotName text;
targetTwoSlotName text;
sharedMemoryId text;
derivedSlotNameOne text;
derivedSlotNameTwo text;
tableOwnerOne bigint;
tableOwnerTwo bigint;
begin
-- setup shared memory information
SELECT * into sharedMemoryId from public.split_shard_replication_setup_for_colocated_shards(targetNode1, targetNode2);
SELECT relowner into tableOwnerOne from pg_class where relname='table_first';
SELECT FORMAT('citus_split_%s_%s_%s', targetNode1, sharedMemoryId, tableOwnerOne) into derivedSlotNameOne;
SELECT slot_name into targetOneSlotName from pg_create_logical_replication_slot(derivedSlotNameOne, 'decoding_plugin_for_shard_split');
SELECT relowner into tableOwnerTwo from pg_class where relname='table_second';
SELECT FORMAT('citus_split_%s_%s_%s', targetNode2, sharedMemoryId, tableOwnerTwo) into derivedSlotNameTwo;
SELECT slot_name into targetTwoSlotName from pg_create_logical_replication_slot(derivedSlotNameTwo, 'decoding_plugin_for_shard_split');
INSERT INTO slotName_table values(targetOneSlotName, targetNode1, 1);
INSERT INTO slotName_table values(targetTwoSlotName, targetNode2, 2);
return targetOneSlotName;
end
$$ LANGUAGE plpgsql;
-- create subscription on target node with given 'subscriptionName'
CREATE OR REPLACE FUNCTION create_subscription(targetNodeId integer, subscriptionName text) RETURNS text AS $$
DECLARE
replicationSlotName text;
nodeportLocal int;
subname text;
begin
SELECT name into replicationSlotName from slotName_table where nodeId = targetNodeId;
EXECUTE FORMAT($sub$create subscription %s connection 'host=localhost port=xxxxx user=postgres dbname=regression' publication PUB1 with(create_slot=false, enabled=true, slot_name='%s', copy_data=false)$sub$, subscriptionName, replicationSlotName);
return replicationSlotName;
end
$$ LANGUAGE plpgsql;
-- create subscription on target node with given 'subscriptionName'
CREATE OR REPLACE FUNCTION create_subscription_for_owner_one(targetNodeId integer, subscriptionName text) RETURNS text AS $$
DECLARE
replicationSlotName text;
nodeportLocal int;
subname text;
begin
SELECT name into replicationSlotName from slotName_table where id = 1;
EXECUTE FORMAT($sub$create subscription %s connection 'host=localhost port=xxxxx user=postgres dbname=regression' publication PUB1 with(create_slot=false, enabled=true, slot_name='%s', copy_data=false)$sub$, subscriptionName, replicationSlotName);
return replicationSlotName;
end
$$ LANGUAGE plpgsql;
-- create subscription on target node with given 'subscriptionName'
CREATE OR REPLACE FUNCTION create_subscription_for_owner_two(targetNodeId integer, subscriptionName text) RETURNS text AS $$
DECLARE
replicationSlotName text;
nodeportLocal int;
subname text;
begin
SELECT name into replicationSlotName from slotName_table where id = 2;
EXECUTE FORMAT($sub$create subscription %s connection 'host=localhost port=xxxxx user=postgres dbname=regression' publication PUB2 with(create_slot=false, enabled=true, slot_name='%s', copy_data=false)$sub$, subscriptionName, replicationSlotName);
return replicationSlotName;
end
$$ LANGUAGE plpgsql;

View File

@ -1,13 +0,0 @@
ALTER SYSTEM SET citus.enable_metadata_sync TO OFF;
SELECT pg_reload_conf();
pg_reload_conf
---------------------------------------------------------------------
t
(1 row)
SET client_min_messages TO ERROR;
SELECT stop_metadata_sync_to_node(nodename, nodeport) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary';
stop_metadata_sync_to_node
---------------------------------------------------------------------
(0 rows)

View File

@ -1,15 +0,0 @@
ALTER SYSTEM SET citus.enable_metadata_sync TO OFF;
SELECT pg_reload_conf();
pg_reload_conf
---------------------------------------------------------------------
t
(1 row)
SET client_min_messages TO ERROR;
SELECT stop_metadata_sync_to_node(nodename, nodeport) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary';
stop_metadata_sync_to_node
---------------------------------------------------------------------
(2 rows)

View File

@ -1,16 +0,0 @@
ALTER SYSTEM SET citus.enable_metadata_sync TO OFF;
SELECT pg_reload_conf();
pg_reload_conf
---------------------------------------------------------------------
t
(1 row)
SET client_min_messages TO ERROR;
SELECT stop_metadata_sync_to_node(nodename, nodeport) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary';
stop_metadata_sync_to_node
---------------------------------------------------------------------
(3 rows)

View File

@ -1,21 +0,0 @@
ALTER SYSTEM SET citus.enable_metadata_sync TO ON;
SELECT pg_reload_conf();
pg_reload_conf
---------------------------------------------------------------------
t
(1 row)
SELECT pg_sleep(0.1);
pg_sleep
---------------------------------------------------------------------
(1 row)
SET client_min_messages TO ERROR;
SELECT start_metadata_sync_to_node(nodename, nodeport) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary';
start_metadata_sync_to_node
---------------------------------------------------------------------
(2 rows)

View File

@ -1,22 +0,0 @@
ALTER SYSTEM SET citus.enable_metadata_sync TO ON;
SELECT pg_reload_conf();
pg_reload_conf
---------------------------------------------------------------------
t
(1 row)
SELECT pg_sleep(0.1);
pg_sleep
---------------------------------------------------------------------
(1 row)
SET client_min_messages TO ERROR;
SELECT start_metadata_sync_to_node(nodename, nodeport) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary';
start_metadata_sync_to_node
---------------------------------------------------------------------
(3 rows)

View File

@ -1,19 +0,0 @@
ALTER SYSTEM SET citus.enable_metadata_sync TO ON;
SELECT pg_reload_conf();
pg_reload_conf
---------------------------------------------------------------------
t
(1 row)
SELECT pg_sleep(0.1);
pg_sleep
---------------------------------------------------------------------
(1 row)
SET client_min_messages TO ERROR;
SELECT start_metadata_sync_to_node(nodename, nodeport) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary';
start_metadata_sync_to_node
---------------------------------------------------------------------
(0 rows)

View File

@ -1,227 +0,0 @@
CREATE SCHEMA worker_shard_binary_copy_test;
SET search_path TO worker_shard_binary_copy_test;
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 81060000;
-- BEGIN: Create distributed table and insert data.
CREATE TABLE worker_shard_binary_copy_test.shard_to_split_copy (
l_orderkey bigint not null,
l_partkey integer not null,
l_suppkey integer not null,
l_linenumber integer not null,
l_quantity decimal(15, 2) not null,
l_extendedprice decimal(15, 2) not null,
l_discount decimal(15, 2) not null,
l_tax decimal(15, 2) not null,
l_returnflag char(1) not null,
l_linestatus char(1) not null,
l_shipdate date not null,
l_commitdate date not null,
l_receiptdate date not null,
l_shipinstruct char(25) not null,
l_shipmode char(10) not null,
l_comment varchar(44) not null);
SELECT create_distributed_table('shard_to_split_copy', 'l_orderkey');
create_distributed_table
---------------------------------------------------------------------
(1 row)
\COPY shard_to_split_copy FROM STDIN WITH DELIMITER '|'
-- END: Create distributed table and insert data.
-- BEGIN: Switch to Worker1, Create target shards in worker for local 2-way split copy.
\c - - - :worker_1_port
CREATE TABLE worker_shard_binary_copy_test.shard_to_split_copy_81060015 (
l_orderkey bigint not null,
l_partkey integer not null,
l_suppkey integer not null,
l_linenumber integer not null,
l_quantity decimal(15, 2) not null,
l_extendedprice decimal(15, 2) not null,
l_discount decimal(15, 2) not null,
l_tax decimal(15, 2) not null,
l_returnflag char(1) not null,
l_linestatus char(1) not null,
l_shipdate date not null,
l_commitdate date not null,
l_receiptdate date not null,
l_shipinstruct char(25) not null,
l_shipmode char(10) not null,
l_comment varchar(44) not null);
CREATE TABLE worker_shard_binary_copy_test.shard_to_split_copy_81060016 (
l_orderkey bigint not null,
l_partkey integer not null,
l_suppkey integer not null,
l_linenumber integer not null,
l_quantity decimal(15, 2) not null,
l_extendedprice decimal(15, 2) not null,
l_discount decimal(15, 2) not null,
l_tax decimal(15, 2) not null,
l_returnflag char(1) not null,
l_linestatus char(1) not null,
l_shipdate date not null,
l_commitdate date not null,
l_receiptdate date not null,
l_shipinstruct char(25) not null,
l_shipmode char(10) not null,
l_comment varchar(44) not null);
-- End: Switch to Worker1, Create target shards in worker for local 2-way split copy.
-- BEGIN: Switch to Worker2, Create target shards in worker for remote 2-way split copy.
\c - - - :worker_2_port
CREATE TABLE worker_shard_binary_copy_test.shard_to_split_copy_81060015 (
l_orderkey bigint not null,
l_partkey integer not null,
l_suppkey integer not null,
l_linenumber integer not null,
l_quantity decimal(15, 2) not null,
l_extendedprice decimal(15, 2) not null,
l_discount decimal(15, 2) not null,
l_tax decimal(15, 2) not null,
l_returnflag char(1) not null,
l_linestatus char(1) not null,
l_shipdate date not null,
l_commitdate date not null,
l_receiptdate date not null,
l_shipinstruct char(25) not null,
l_shipmode char(10) not null,
l_comment varchar(44) not null);
CREATE TABLE worker_shard_binary_copy_test.shard_to_split_copy_81060016 (
l_orderkey bigint not null,
l_partkey integer not null,
l_suppkey integer not null,
l_linenumber integer not null,
l_quantity decimal(15, 2) not null,
l_extendedprice decimal(15, 2) not null,
l_discount decimal(15, 2) not null,
l_tax decimal(15, 2) not null,
l_returnflag char(1) not null,
l_linestatus char(1) not null,
l_shipdate date not null,
l_commitdate date not null,
l_receiptdate date not null,
l_shipinstruct char(25) not null,
l_shipmode char(10) not null,
l_comment varchar(44) not null);
-- End: Switch to Worker2, Create target shards in worker for remote 2-way split copy.
-- BEGIN: List row count for source shard and targets shard in Worker1.
\c - - - :worker_1_port
SELECT COUNT(*) FROM worker_shard_binary_copy_test.shard_to_split_copy_81060000;
count
---------------------------------------------------------------------
22
(1 row)
SELECT COUNT(*) FROM worker_shard_binary_copy_test.shard_to_split_copy_81060015;
count
---------------------------------------------------------------------
0
(1 row)
SELECT COUNT(*) FROM worker_shard_binary_copy_test.shard_to_split_copy_81060016;
count
---------------------------------------------------------------------
0
(1 row)
-- END: List row count for source shard and targets shard in Worker1.
-- BEGIN: List row count for target shard in Worker2.
\c - - - :worker_2_port
SELECT COUNT(*) FROM worker_shard_binary_copy_test.shard_to_split_copy_81060015;
count
---------------------------------------------------------------------
0
(1 row)
SELECT COUNT(*) FROM worker_shard_binary_copy_test.shard_to_split_copy_81060016;
count
---------------------------------------------------------------------
0
(1 row)
-- END: List row count for targets shard in Worker2.
-- BEGIN: Set worker_1_node and worker_2_node
\c - - - :worker_1_port
SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset
SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset
-- END: Set worker_1_node and worker_2_node
-- BEGIN: Trigger 2-way local shard split copy.
-- Ensure we will perform binary copy.
SET citus.enable_binary_protocol = TRUE;
SELECT * from worker_split_copy(
81060000, -- source shard id to copy
ARRAY[
-- split copy info for split children 1
ROW(81060015, -- destination shard id
-2147483648, -- split range begin
1073741823, --split range end
:worker_1_node)::pg_catalog.split_copy_info,
-- split copy info for split children 2
ROW(81060016, --destination shard id
1073741824, --split range begin
2147483647, --split range end
:worker_1_node)::pg_catalog.split_copy_info
]
);
worker_split_copy
---------------------------------------------------------------------
(1 row)
-- END: Trigger 2-way local shard split copy.
-- BEGIN: Trigger 2-way remote shard split copy.
SELECT * from worker_split_copy(
81060000, -- source shard id to copy
ARRAY[
-- split copy info for split children 1
ROW(81060015, -- destination shard id
-2147483648, -- split range begin
1073741823, --split range end
:worker_2_node)::pg_catalog.split_copy_info,
-- split copy info for split children 2
ROW(81060016, --destination shard id
1073741824, --split range begin
2147483647, --split range end
:worker_2_node)::pg_catalog.split_copy_info
]
);
worker_split_copy
---------------------------------------------------------------------
(1 row)
-- END: Trigger 2-way remote shard split copy.
-- BEGIN: List updated row count for local targets shard.
SELECT COUNT(*) FROM worker_shard_binary_copy_test.shard_to_split_copy_81060015;
count
---------------------------------------------------------------------
21
(1 row)
SELECT COUNT(*) FROM worker_shard_binary_copy_test.shard_to_split_copy_81060016;
count
---------------------------------------------------------------------
1
(1 row)
-- END: List updated row count for local targets shard.
-- BEGIN: List updated row count for remote targets shard.
\c - - - :worker_2_port
SELECT COUNT(*) FROM worker_shard_binary_copy_test.shard_to_split_copy_81060015;
count
---------------------------------------------------------------------
21
(1 row)
SELECT COUNT(*) FROM worker_shard_binary_copy_test.shard_to_split_copy_81060016;
count
---------------------------------------------------------------------
1
(1 row)
-- END: List updated row count for remote targets shard.
-- BEGIN: CLEANUP.
\c - - - :master_port
SET client_min_messages TO WARNING;
DROP SCHEMA citus_split_shard_by_split_points_local CASCADE;
ERROR: schema "citus_split_shard_by_split_points_local" does not exist
-- END: CLEANUP.

View File

@ -1,227 +0,0 @@
CREATE SCHEMA worker_shard_text_copy_test;
SET search_path TO worker_shard_text_copy_test;
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 81070000;
-- BEGIN: Create distributed table and insert data.
CREATE TABLE worker_shard_text_copy_test.shard_to_split_copy (
l_orderkey bigint not null,
l_partkey integer not null,
l_suppkey integer not null,
l_linenumber integer not null,
l_quantity decimal(15, 2) not null,
l_extendedprice decimal(15, 2) not null,
l_discount decimal(15, 2) not null,
l_tax decimal(15, 2) not null,
l_returnflag char(1) not null,
l_linestatus char(1) not null,
l_shipdate date not null,
l_commitdate date not null,
l_receiptdate date not null,
l_shipinstruct char(25) not null,
l_shipmode char(10) not null,
l_comment varchar(44) not null);
SELECT create_distributed_table('shard_to_split_copy', 'l_orderkey');
create_distributed_table
---------------------------------------------------------------------
(1 row)
\COPY shard_to_split_copy FROM STDIN WITH DELIMITER '|'
-- END: Create distributed table and insert data.
-- BEGIN: Switch to Worker1, Create target shards in worker for local 2-way split copy.
\c - - - :worker_1_port
CREATE TABLE worker_shard_text_copy_test.shard_to_split_copy_81070015 (
l_orderkey bigint not null,
l_partkey integer not null,
l_suppkey integer not null,
l_linenumber integer not null,
l_quantity decimal(15, 2) not null,
l_extendedprice decimal(15, 2) not null,
l_discount decimal(15, 2) not null,
l_tax decimal(15, 2) not null,
l_returnflag char(1) not null,
l_linestatus char(1) not null,
l_shipdate date not null,
l_commitdate date not null,
l_receiptdate date not null,
l_shipinstruct char(25) not null,
l_shipmode char(10) not null,
l_comment varchar(44) not null);
CREATE TABLE worker_shard_text_copy_test.shard_to_split_copy_81070016 (
l_orderkey bigint not null,
l_partkey integer not null,
l_suppkey integer not null,
l_linenumber integer not null,
l_quantity decimal(15, 2) not null,
l_extendedprice decimal(15, 2) not null,
l_discount decimal(15, 2) not null,
l_tax decimal(15, 2) not null,
l_returnflag char(1) not null,
l_linestatus char(1) not null,
l_shipdate date not null,
l_commitdate date not null,
l_receiptdate date not null,
l_shipinstruct char(25) not null,
l_shipmode char(10) not null,
l_comment varchar(44) not null);
-- End: Switch to Worker1, Create target shards in worker for local 2-way split copy.
-- BEGIN: Switch to Worker2, Create target shards in worker for remote 2-way split copy.
\c - - - :worker_2_port
CREATE TABLE worker_shard_text_copy_test.shard_to_split_copy_81070015 (
l_orderkey bigint not null,
l_partkey integer not null,
l_suppkey integer not null,
l_linenumber integer not null,
l_quantity decimal(15, 2) not null,
l_extendedprice decimal(15, 2) not null,
l_discount decimal(15, 2) not null,
l_tax decimal(15, 2) not null,
l_returnflag char(1) not null,
l_linestatus char(1) not null,
l_shipdate date not null,
l_commitdate date not null,
l_receiptdate date not null,
l_shipinstruct char(25) not null,
l_shipmode char(10) not null,
l_comment varchar(44) not null);
CREATE TABLE worker_shard_text_copy_test.shard_to_split_copy_81070016 (
l_orderkey bigint not null,
l_partkey integer not null,
l_suppkey integer not null,
l_linenumber integer not null,
l_quantity decimal(15, 2) not null,
l_extendedprice decimal(15, 2) not null,
l_discount decimal(15, 2) not null,
l_tax decimal(15, 2) not null,
l_returnflag char(1) not null,
l_linestatus char(1) not null,
l_shipdate date not null,
l_commitdate date not null,
l_receiptdate date not null,
l_shipinstruct char(25) not null,
l_shipmode char(10) not null,
l_comment varchar(44) not null);
-- End: Switch to Worker2, Create target shards in worker for remote 2-way split copy.
-- BEGIN: List row count for source shard and targets shard in Worker1.
\c - - - :worker_1_port
SELECT COUNT(*) FROM worker_shard_text_copy_test.shard_to_split_copy_81070000;
count
---------------------------------------------------------------------
22
(1 row)
SELECT COUNT(*) FROM worker_shard_text_copy_test.shard_to_split_copy_81070015;
count
---------------------------------------------------------------------
0
(1 row)
SELECT COUNT(*) FROM worker_shard_text_copy_test.shard_to_split_copy_81070016;
count
---------------------------------------------------------------------
0
(1 row)
-- END: List row count for source shard and targets shard in Worker1.
-- BEGIN: List row count for target shard in Worker2.
\c - - - :worker_2_port
SELECT COUNT(*) FROM worker_shard_text_copy_test.shard_to_split_copy_81070015;
count
---------------------------------------------------------------------
0
(1 row)
SELECT COUNT(*) FROM worker_shard_text_copy_test.shard_to_split_copy_81070016;
count
---------------------------------------------------------------------
0
(1 row)
-- END: List row count for targets shard in Worker2.
-- BEGIN: Set worker_1_node and worker_2_node
\c - - - :worker_1_port
SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset
SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset
-- END: Set worker_1_node and worker_2_node
-- BEGIN: Trigger 2-way local shard split copy.
-- Ensure we will perform text copy.
SET citus.enable_binary_protocol = FALSE;
SELECT * from worker_split_copy(
81070000, -- source shard id to copy
ARRAY[
-- split copy info for split children 1
ROW(81070015, -- destination shard id
-2147483648, -- split range begin
1073741823, --split range end
:worker_1_node)::pg_catalog.split_copy_info,
-- split copy info for split children 2
ROW(81070016, --destination shard id
1073741824, --split range begin
2147483647, --split range end
:worker_1_node)::pg_catalog.split_copy_info
]
);
worker_split_copy
---------------------------------------------------------------------
(1 row)
-- END: Trigger 2-way local shard split copy.
-- BEGIN: Trigger 2-way remote shard split copy.
SELECT * from worker_split_copy(
81070000, -- source shard id to copy
ARRAY[
-- split copy info for split children 1
ROW(81070015, -- destination shard id
-2147483648, -- split range begin
1073741823, --split range end
:worker_2_node)::pg_catalog.split_copy_info,
-- split copy info for split children 2
ROW(81070016, --destination shard id
1073741824, --split range begin
2147483647, --split range end
:worker_2_node)::pg_catalog.split_copy_info
]
);
worker_split_copy
---------------------------------------------------------------------
(1 row)
-- END: Trigger 2-way remote shard split copy.
-- BEGIN: List updated row count for local targets shard.
SELECT COUNT(*) FROM worker_shard_text_copy_test.shard_to_split_copy_81070015;
count
---------------------------------------------------------------------
21
(1 row)
SELECT COUNT(*) FROM worker_shard_text_copy_test.shard_to_split_copy_81070016;
count
---------------------------------------------------------------------
1
(1 row)
-- END: List updated row count for local targets shard.
-- BEGIN: List updated row count for remote targets shard.
\c - - - :worker_2_port
SELECT COUNT(*) FROM worker_shard_text_copy_test.shard_to_split_copy_81070015;
count
---------------------------------------------------------------------
21
(1 row)
SELECT COUNT(*) FROM worker_shard_text_copy_test.shard_to_split_copy_81070016;
count
---------------------------------------------------------------------
1
(1 row)
-- END: List updated row count for remote targets shard.
-- BEGIN: CLEANUP.
\c - - - :master_port
SET client_min_messages TO WARNING;
DROP SCHEMA citus_split_shard_by_split_points_local CASCADE;
ERROR: schema "citus_split_shard_by_split_points_local" does not exist
-- END: CLEANUP.

View File

@ -290,7 +290,7 @@ test: auto_undist_citus_local
test: mx_regular_user
test: citus_locks
test: global_cancel
test: sequencenes_owned_by
test: sequences_owned_by
test: remove_coordinator
# ----------

View File

@ -19,7 +19,6 @@
# remove / add node operations, we do not want any preexisting objects before
# propagate_extension_commands
# ---
test: turn_mx_off
test: multi_test_helpers
test: multi_test_catalog_views
test: multi_name_resolution

View File

@ -19,7 +19,6 @@
# remove / add node operations, we do not want any preexisting objects before
# propagate_extension_commands
# ---
test: turn_mx_off
test: multi_test_helpers multi_test_helpers_superuser
test: multi_test_catalog_views
test: multi_name_lengths

View File

@ -1,3 +1,8 @@
// Three alternative test outputs:
// isolation_master_update_node.out for PG15
// isolation_master_update_node_0.out for PG14
// isolation_master_update_node_1.out for PG13
setup
{
-- revert back to pg_isolation_test_session_is_blocked until the tests are fixed