Change citus truncate trigger to AFTER and add more upgrade tests (#3070)

* Add more upgrade tests

* Fix citus trigger generation after upgrade

citus_truncate_trigger runs before truncate when created by create_distributed_table:
492d1b2cba/src/backend/distributed/commands/create_distributed_table.c (L1163)

* Remove pg_dist_jobid_seq
pull/3082/head
Jelte Fennema 2019-10-07 16:43:04 +02:00 committed by GitHub
parent bffd110446
commit 01da11f264
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
49 changed files with 896 additions and 262 deletions

View File

@ -1160,7 +1160,7 @@ CreateTruncateTrigger(Oid relationId)
trigger->funcname = SystemFuncName("citus_truncate_trigger");
trigger->args = NIL;
trigger->row = false;
trigger->timing = TRIGGER_TYPE_BEFORE;
trigger->timing = TRIGGER_TYPE_AFTER;
trigger->events = TRIGGER_TYPE_TRUNCATE;
trigger->columns = NIL;
trigger->whenClause = NULL;

View File

@ -174,4 +174,41 @@ COMMENT ON FUNCTION worker_apply_sequence_command(text,regtype)
#include "udfs/citus_isolation_test_session_is_blocked/9.0-1.sql"
CREATE FUNCTION ensure_truncate_trigger_is_after()
RETURNS void
LANGUAGE plpgsql
SET search_path = pg_catalog
AS $$
DECLARE
table_name regclass;
command text;
trigger_name text;
BEGIN
--
-- register triggers
--
FOR table_name, trigger_name IN SELECT tgrelid::regclass, tgname
FROM pg_dist_partition
JOIN pg_trigger ON tgrelid=logicalrelid
JOIN pg_class ON pg_class.oid=logicalrelid
WHERE
tgname LIKE 'truncate_trigger_%' AND tgfoid = 'citus_truncate_trigger'::regproc
LOOP
command := 'drop trigger ' || trigger_name || ' on ' || table_name;
EXECUTE command;
command := 'create trigger ' || trigger_name || ' after truncate on ' || table_name || ' execute procedure pg_catalog.citus_truncate_trigger()';
EXECUTE command;
command := 'update pg_trigger set tgisinternal = true where tgname = ' || quote_literal(trigger_name);
EXECUTE command;
END LOOP;
END;
$$;
SELECT ensure_truncate_trigger_is_after();
DROP FUNCTION ensure_truncate_trigger_is_after;
-- This sequence is unused
DROP SEQUENCE pg_catalog.pg_dist_jobid_seq;
RESET search_path;

View File

@ -1,3 +1,3 @@
# this schedule is to be run only on coordinators
test: after_citus_upgrade_coord
test: upgrade_basic_after

View File

@ -1 +1 @@
test: upgrade_distributed_table_after
test: upgrade_basic_after upgrade_type_after upgrade_ref2ref_after

View File

@ -1,3 +1,3 @@
# this schedule is to be run on only coordinators
test: before_citus_upgrade_coord
test: upgrade_basic_before

View File

@ -1 +1,3 @@
test: upgrade_distributed_table_before
# The basic tests runs analyze which depends on shard numbers
test: upgrade_basic_before
test: upgrade_type_before upgrade_ref2ref_before

View File

@ -1,38 +0,0 @@
SET search_path TO before_citus_upgrade_coord, public;
SELECT * FROM t ORDER BY a;
a
---
1
2
3
4
5
(5 rows)
SELECT * FROM t WHERE a = 1;
a
---
1
(1 row)
INSERT INTO t SELECT * FROM generate_series(10, 15);
SELECT * FROM t WHERE a = 10;
a
----
10
(1 row)
SELECT * FROM t WHERE a = 11;
a
----
11
(1 row)
TRUNCATE TABLE t;
SELECT * FROM T;
a
---
(0 rows)
DROP TABLE t;
DROP SCHEMA before_citus_upgrade_coord CASCADE;

View File

@ -1,10 +0,0 @@
CREATE SCHEMA before_citus_upgrade_coord;
SET search_path TO before_citus_upgrade_coord, public;
CREATE TABLE t(a int);
SELECT create_distributed_table('t', 'a');
create_distributed_table
--------------------------
(1 row)
INSERT INTO t SELECT * FROM generate_series(1, 5);

View File

@ -29,11 +29,11 @@ step detector-dump-wait-edges:
waiting_transaction_numblocking_transaction_numblocking_transaction_waiting
274 273 f
276 275 f
transactionnumberwaitingtransactionnumbers
273
274 273
275
276 275
step s1-abort:
ABORT;
@ -77,14 +77,14 @@ step detector-dump-wait-edges:
waiting_transaction_numblocking_transaction_numblocking_transaction_waiting
278 277 f
279 277 f
279 278 t
280 279 f
281 279 f
281 280 t
transactionnumberwaitingtransactionnumbers
277
278 277
279 277,278
279
280 279
281 279,280
step s1-abort:
ABORT;

View File

@ -6,7 +6,6 @@
-- It'd be nice to script generation of this file, but alas, that's
-- not done yet.
SET citus.next_shard_id TO 580000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 580000;
CREATE SCHEMA test;
CREATE OR REPLACE FUNCTION test.maintenance_worker(p_dbname text DEFAULT current_database())
RETURNS pg_stat_activity

View File

@ -12,7 +12,6 @@ SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
(1 row)
SET citus.next_shard_id TO 1420000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1420000;
SET citus.shard_replication_factor TO 1;
ALTER SYSTEM SET citus.metadata_sync_interval TO 3000;
ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 500;

View File

@ -12,7 +12,6 @@ SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
(1 row)
SET citus.next_shard_id TO 1420000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1420000;
SET citus.shard_replication_factor TO 1;
CREATE TABLE test (id integer, val integer);
SELECT create_distributed_table('test', 'id');

View File

@ -5,7 +5,6 @@
-- To find the relation size and total relation size citus_relation_size and
-- citus_total_relation_size are also tested.
SET citus.next_shard_id TO 1460000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1460000;
SET citus.shard_replication_factor to 1;
CREATE TABLE test_table_1_rf1(id int, val_1 int);
SELECT create_distributed_table('test_table_1_rf1','id');

View File

@ -3,7 +3,6 @@
-- this file is intended to test multi shard update/delete queries
--
SET citus.next_shard_id TO 1440000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1440000;
SET citus.shard_replication_factor to 1;
SET citus.multi_shard_modify_mode to 'parallel';
CREATE TABLE users_test_table(user_id int, value_1 int, value_2 int, value_3 int);

View File

@ -3,7 +3,6 @@
-- this file is intended to test multi shard update/delete queries
--
SET citus.next_shard_id TO 1440000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1440000;
SET citus.shard_replication_factor to 1;
SET citus.multi_shard_modify_mode to 'parallel';
CREATE TABLE users_test_table(user_id int, value_1 int, value_2 int, value_3 int);

View File

@ -5,7 +5,6 @@
-- To find the relation size and total relation size citus_relation_size and
-- citus_total_relation_size are also tested.
SET citus.next_shard_id TO 1390000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1390000;
-- Tests with invalid relation IDs
SELECT citus_table_size(1);
ERROR: could not compute table size: relation does not exist

View File

@ -5,7 +5,6 @@
--
-- We don't need shard id sequence here, so commented out to prevent conflicts with concurrent tests
-- SET citus.next_shard_id TO 1400000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1400000;
--
-- UNIONs and JOINs mixed

View File

@ -5,7 +5,6 @@
--
-- We don't need shard id sequence here, so commented out to prevent conflicts with concurrent tests
-- SET citus.next_shard_id TO 1400000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1400000;
CREATE TABLE user_buy_test_table(user_id int, item_id int, buy_count int);
SELECT create_distributed_table('user_buy_test_table', 'user_id');
create_distributed_table

View File

@ -2,7 +2,6 @@
-- MULTI_TASK_STRING_SIZE
--
SET citus.next_shard_id TO 1602000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1602000;
CREATE TABLE wide_table
(
long_column_001 int,

View File

@ -0,0 +1,348 @@
SET search_path TO upgrade_basic, public, pg_catalog;
BEGIN;
SELECT * FROM pg_indexes WHERE schemaname = 'upgrade_basic' ORDER BY tablename;
schemaname | tablename | indexname | tablespace | indexdef
---------------+-----------+-----------+------------+-----------------------------------------------------------------
upgrade_basic | r | r_pkey | | CREATE UNIQUE INDEX r_pkey ON upgrade_basic.r USING btree (a)
upgrade_basic | t | t_a_idx | | CREATE INDEX t_a_idx ON upgrade_basic.t USING hash (a)
upgrade_basic | tp | tp_pkey | | CREATE UNIQUE INDEX tp_pkey ON upgrade_basic.tp USING btree (a)
(3 rows)
SELECT nextval('pg_dist_shardid_seq') = MAX(shardid)+1 FROM pg_dist_shard;
?column?
----------
t
(1 row)
SELECT nextval('pg_dist_placement_placementid_seq') = MAX(placementid)+1 FROM pg_dist_placement;
?column?
----------
t
(1 row)
SELECT nextval('pg_dist_groupid_seq') = MAX(groupid)+1 FROM pg_dist_node;
?column?
----------
t
(1 row)
SELECT nextval('pg_dist_node_nodeid_seq') = MAX(nodeid)+1 FROM pg_dist_node;
?column?
----------
t
(1 row)
SELECT nextval('pg_dist_colocationid_seq') = MAX(colocationid)+1 FROM pg_dist_colocation;
?column?
----------
t
(1 row)
-- If this query gives output it means we've added a new sequence that should
-- possibly be restored after upgrades.
SELECT sequence_name FROM information_schema.sequences
WHERE sequence_name LIKE 'pg_dist_%'
AND sequence_name NOT IN (
-- these ones are restored above
'pg_dist_shardid_seq',
'pg_dist_placement_placementid_seq',
'pg_dist_groupid_seq',
'pg_dist_node_nodeid_seq',
'pg_dist_colocationid_seq'
);
sequence_name
---------------
(0 rows)
SELECT logicalrelid FROM pg_dist_partition
JOIN pg_depend ON logicalrelid=objid
JOIN pg_catalog.pg_class ON logicalrelid=oid
WHERE
refobjid=(select oid FROM pg_extension WHERE extname = 'citus')
AND relnamespace='upgrade_basic'::regnamespace
ORDER BY logicalrelid;
logicalrelid
--------------
t
tp
t_ab
r
tr
t_append
(6 rows)
SELECT tgrelid::regclass, tgfoid::regproc, tgisinternal, tgenabled, tgtype::int4::bit(8)
FROM pg_dist_partition
JOIN pg_trigger ON tgrelid=logicalrelid
JOIN pg_class ON pg_class.oid=logicalrelid
WHERE
relnamespace='upgrade_basic'::regnamespace
AND tgname LIKE 'truncate_trigger_%'
ORDER BY tgrelid::regclass;
tgrelid | tgfoid | tgisinternal | tgenabled | tgtype
----------+------------------------+--------------+-----------+----------
t | citus_truncate_trigger | t | O | 00100000
tp | citus_truncate_trigger | t | O | 00100000
t_ab | citus_truncate_trigger | t | O | 00100000
r | citus_truncate_trigger | t | O | 00100000
tr | citus_truncate_trigger | t | O | 00100000
t_append | citus_truncate_trigger | t | O | 00100000
(6 rows)
SELECT * FROM t ORDER BY a;
a
---
1
2
3
4
5
(5 rows)
SELECT * FROM t WHERE a = 1;
a
---
1
(1 row)
INSERT INTO t SELECT * FROM generate_series(10, 15);
EXPLAIN (COSTS FALSE) SELECT * from t;
QUERY PLAN
---------------------------------------------------------
Custom Scan (Citus Adaptive)
Task Count: 32
Tasks Shown: One of 32
-> Task
Node: host=localhost port=57636 dbname=postgres
-> Seq Scan on t_102008 t
(6 rows)
EXPLAIN (COSTS FALSE) SELECT * from t WHERE a = 1;
QUERY PLAN
---------------------------------------------------------
Custom Scan (Citus Adaptive)
Task Count: 1
Tasks Shown: All
-> Task
Node: host=localhost port=57637 dbname=postgres
-> Bitmap Heap Scan on t_102009 t
Recheck Cond: (a = 1)
-> Bitmap Index Scan on t_a_idx_102009
Index Cond: (a = 1)
(9 rows)
SELECT * FROM t WHERE a = 10;
a
----
10
(1 row)
SELECT * FROM t WHERE a = 11;
a
----
11
(1 row)
COPY t FROM PROGRAM 'echo 20 && echo 21 && echo 22 && echo 23 && echo 24' WITH CSV;
ALTER TABLE t ADD COLUMN b int DEFAULT 10;
SELECT * FROM t ORDER BY a;
a | b
----+----
1 | 10
2 | 10
3 | 10
4 | 10
5 | 10
10 | 10
11 | 10
12 | 10
13 | 10
14 | 10
15 | 10
20 | 10
21 | 10
22 | 10
23 | 10
24 | 10
(16 rows)
TRUNCATE TABLE t;
SELECT * FROM T;
a | b
---+---
(0 rows)
DROP TABLE t;
\d t
-- verify that the table whose column is dropped before a pg_upgrade still works as expected.
SELECT * FROM t_ab ORDER BY b;
b
----
11
22
33
(3 rows)
SELECT * FROM t_ab WHERE b = 11;
b
----
11
(1 row)
SELECT * FROM t_ab WHERE b = 22;
b
----
22
(1 row)
-- Check that we can create a distributed table out of a table that was created
-- before the upgrade
SELECT * FROM t2 ORDER BY a;
a | b
---+----
1 | 11
2 | 22
3 | 33
(3 rows)
SELECT create_distributed_table('t2', 'a');
NOTICE: Copying data from local table...
create_distributed_table
--------------------------
(1 row)
SELECT * FROM t2 ORDER BY a;
a | b
---+----
1 | 11
2 | 22
3 | 33
(3 rows)
ROLLBACK;
BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
SELECT * FROM r ORDER BY a;
a
---
1
2
3
4
5
(5 rows)
SELECT * FROM tr ORDER BY pk;
pk | a
----+---
1 | 1
2 | 2
3 | 3
4 | 4
5 | 5
(5 rows)
DELETE FROM r where a = 1;
SELECT * FROM r ORDER BY a;
a
---
2
3
4
5
(4 rows)
SELECT * FROM tr ORDER BY pk;
pk | a
----+---
2 | 2
3 | 3
4 | 4
5 | 5
(4 rows)
UPDATE r SET a = 30 WHERE a = 3;
SELECT * FROM r ORDER BY a;
a
----
2
4
5
30
(4 rows)
SELECT * FROM tr ORDER BY pk;
pk | a
----+----
2 | 2
3 | 30
4 | 4
5 | 5
(4 rows)
-- Check we can still create distributed tables after upgrade
CREATE TABLE t3(a int, b int);
SELECT create_distributed_table('t3', 'a');
create_distributed_table
--------------------------
(1 row)
INSERT INTO t3 VALUES (1, 11);
INSERT INTO t3 VALUES (2, 22);
INSERT INTO t3 VALUES (3, 33);
SELECT * FROM t3 ORDER BY a;
a | b
---+----
1 | 11
2 | 22
3 | 33
(3 rows)
SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard
WHERE logicalrelid = 't_append'::regclass
ORDER BY shardminvalue, shardmaxvalue;
shardminvalue | shardmaxvalue
---------------+---------------
1 | 3
5 | 7
(2 rows)
SELECT * FROM t_append ORDER BY id;
id | value_1
----+---------
1 | 2
2 | 3
3 | 4
5 | 2
6 | 3
7 | 4
(6 rows)
\copy t_append FROM STDIN DELIMITER ','
SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard
WHERE logicalrelid = 't_append'::regclass
ORDER BY shardminvalue, shardmaxvalue;
shardminvalue | shardmaxvalue
---------------+---------------
1 | 3
5 | 7
9 | 11
(3 rows)
SELECT * FROM t_append ORDER BY id;
id | value_1
----+---------
1 | 2
2 | 3
3 | 4
5 | 2
6 | 3
7 | 4
9 | 2
10 | 3
11 | 4
(9 rows)
ROLLBACK;

View File

@ -0,0 +1,63 @@
CREATE SCHEMA upgrade_basic;
SET search_path TO upgrade_basic, public;
CREATE TABLE t(a int);
CREATE INDEX ON t USING HASH (a);
SELECT create_distributed_table('t', 'a');
create_distributed_table
--------------------------
(1 row)
INSERT INTO t SELECT * FROM generate_series(1, 5);
CREATE TABLE tp(a int PRIMARY KEY);
SELECT create_distributed_table('tp', 'a');
create_distributed_table
--------------------------
(1 row)
INSERT INTO tp SELECT * FROM generate_series(1, 5);
-- We store the index of distribution column and here we check that the distribution
-- column index does not change after an upgrade if we drop a column that comes before the
-- distribution column. The index information is in partkey column of pg_dist_partition table.
CREATE TABLE t_ab(a int, b int);
SELECT create_distributed_table('t_ab', 'b');
create_distributed_table
--------------------------
(1 row)
INSERT INTO t_ab VALUES (1, 11);
INSERT INTO t_ab VALUES (2, 22);
INSERT INTO t_ab VALUES (3, 33);
CREATE TABLE t2(a int, b int);
INSERT INTO t2 VALUES (1, 11);
INSERT INTO t2 VALUES (2, 22);
INSERT INTO t2 VALUES (3, 33);
ALTER TABLE t_ab DROP a;
-- Check that basic reference tables work
CREATE TABLE r(a int PRIMARY KEY);
SELECT create_reference_table('r');
create_reference_table
------------------------
(1 row)
INSERT INTO r SELECT * FROM generate_series(1, 5);
CREATE TABLE tr(pk int, a int REFERENCES r(a) ON DELETE CASCADE ON UPDATE CASCADE);
SELECT create_distributed_table('tr', 'pk');
create_distributed_table
--------------------------
(1 row)
INSERT INTO tr SELECT c, c FROM generate_series(1, 5) as c;
CREATE TABLE t_append(id int, value_1 int);
SELECT master_create_distributed_table('t_append', 'id', 'append');
master_create_distributed_table
---------------------------------
(1 row)
\copy t_append FROM STDIN DELIMITER ','
\copy t_append FROM STDIN DELIMITER ','

View File

@ -1,73 +0,0 @@
SET search_path TO upgrade_distributed_table_before, public;
SELECT * FROM t ORDER BY a;
a
---
1
2
3
4
5
(5 rows)
SELECT * FROM t WHERE a = 1;
a
---
1
(1 row)
INSERT INTO t SELECT * FROM generate_series(10, 15);
SELECT * FROM t WHERE a = 10;
a
----
10
(1 row)
SELECT * FROM t WHERE a = 11;
a
----
11
(1 row)
-- test distributed type
INSERT INTO t1 VALUES (1, (2,3)::tc1);
SELECT * FROM t1;
a | b
---+-------
1 | (2,3)
(1 row)
ALTER TYPE tc1 RENAME TO tc1_newname;
INSERT INTO t1 VALUES (3, (4,5)::tc1_newname);
TRUNCATE TABLE t;
SELECT * FROM T;
a
---
(0 rows)
-- verify that the table whose column is dropped before a pg_upgrade still works as expected.
SELECT * FROM t_ab ORDER BY b;
b
----
11
22
33
(3 rows)
SELECT * FROM t_ab WHERE b = 11;
b
----
11
(1 row)
SELECT * FROM t_ab WHERE b = 22;
b
----
22
(1 row)
DROP TABLE t;
DROP SCHEMA upgrade_distributed_table_before CASCADE;
NOTICE: drop cascades to 3 other objects
DETAIL: drop cascades to type tc1_newname
drop cascades to table t1
drop cascades to table t_ab

View File

@ -1,32 +0,0 @@
CREATE SCHEMA upgrade_distributed_table_before;
SET search_path TO upgrade_distributed_table_before, public;
CREATE TABLE t(a int);
SELECT create_distributed_table('t', 'a');
create_distributed_table
--------------------------
(1 row)
INSERT INTO t SELECT * FROM generate_series(1, 5);
CREATE TYPE tc1 AS (a int, b int);
CREATE TABLE t1 (a int PRIMARY KEY, b tc1);
SELECT create_distributed_table('t1','a');
create_distributed_table
--------------------------
(1 row)
-- We store the index of distribution column and here we check that the distribution
-- column index does not change after an upgrade if we drop a column that comes before the
-- distribution column. The index information is in partkey column of pg_dist_partition table.
CREATE TABLE t_ab(a int, b int);
SELECT create_distributed_table('t_ab', 'b');
create_distributed_table
--------------------------
(1 row)
INSERT INTO t_ab VALUES (1, 11);
INSERT INTO t_ab VALUES (2, 22);
INSERT INTO t_ab VALUES (3, 33);
ALTER TABLE t_ab DROP a;

View File

@ -0,0 +1,122 @@
SET search_path TO upgrade_ref2ref, public;
BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
SELECT * FROM ref_table_1 ORDER BY id;
id | value
----+-------
1 | 1
2 | 2
3 | 3
4 | 4
5 | 5
(5 rows)
SELECT * FROM ref_table_2 ORDER BY id;
id | value
----+-------
1 | 1
2 | 2
3 | 3
4 | 4
5 | 5
(5 rows)
SELECT * FROM ref_table_3 ORDER BY id;
id | value
----+-------
1 | 1
2 | 2
3 | 3
4 | 4
5 | 5
(5 rows)
SELECT * FROM dist_table ORDER BY id;
id | value
----+-------
1 | 1
2 | 2
3 | 3
4 | 4
5 | 5
(5 rows)
UPDATE ref_table_1 SET id = 10 where id = 1;
SELECT * FROM ref_table_1 ORDER BY id;
id | value
----+-------
2 | 2
3 | 3
4 | 4
5 | 5
10 | 1
(5 rows)
SELECT * FROM ref_table_2 ORDER BY id;
id | value
----+-------
1 | 10
2 | 2
3 | 3
4 | 4
5 | 5
(5 rows)
SELECT * FROM ref_table_3 ORDER BY id;
id | value
----+-------
1 | 1
2 | 2
3 | 3
4 | 4
5 | 5
(5 rows)
SELECT * FROM dist_table ORDER BY id;
id | value
----+-------
1 | 1
2 | 2
3 | 3
4 | 4
5 | 5
(5 rows)
DELETE FROM ref_table_1 WHERE id = 4;
SELECT * FROM ref_table_1 ORDER BY id;
id | value
----+-------
2 | 2
3 | 3
5 | 5
10 | 1
(4 rows)
SELECT * FROM ref_table_2 ORDER BY id;
id | value
----+-------
1 | 10
2 | 2
3 | 3
5 | 5
(4 rows)
SELECT * FROM ref_table_3 ORDER BY id;
id | value
----+-------
1 | 1
2 | 2
3 | 3
5 | 5
(4 rows)
SELECT * FROM dist_table ORDER BY id;
id | value
----+-------
1 | 1
2 | 2
3 | 3
5 | 5
(4 rows)
ROLLBACK;

View File

@ -0,0 +1,34 @@
CREATE SCHEMA upgrade_ref2ref;
SET search_path TO upgrade_ref2ref, public;
CREATE TABLE ref_table_1(id int PRIMARY KEY, value int);
SELECT create_reference_table('ref_table_1');
create_reference_table
------------------------
(1 row)
CREATE TABLE ref_table_2(id int PRIMARY KEY, value int REFERENCES ref_table_1(id) ON DELETE CASCADE ON UPDATE CASCADE);
SELECT create_reference_table('ref_table_2');
create_reference_table
------------------------
(1 row)
CREATE TABLE ref_table_3(id int PRIMARY KEY, value int REFERENCES ref_table_2(id) ON DELETE CASCADE ON UPDATE CASCADE);
SELECT create_reference_table('ref_table_3');
create_reference_table
------------------------
(1 row)
CREATE TABLE dist_table(id int PRIMARY KEY, value int REFERENCES ref_table_2(id) ON DELETE CASCADE ON UPDATE CASCADE);
SELECT create_distributed_table('dist_table', 'id');
create_distributed_table
--------------------------
(1 row)
INSERT INTO ref_table_1 SELECT c, c FROM generate_series(1, 5) as c;
INSERT INTO ref_table_2 SELECT * FROM ref_table_1;
INSERT INTO ref_table_3 SELECT * FROM ref_table_2;
INSERT INTO dist_table SELECT * FROM ref_table_3;

View File

@ -0,0 +1,15 @@
SET search_path TO upgrade_type, public;
BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
-- test distributed type
INSERT INTO tt VALUES (1, (2,3)::type1);
SELECT * FROM tt;
a | b
---+-------
1 | (2,3)
2 | (3,4)
(2 rows)
ALTER TYPE type1 RENAME TO type1_newname;
INSERT INTO tt VALUES (3, (4,5)::type1_newname);
ROLLBACK;

View File

@ -0,0 +1,11 @@
CREATE SCHEMA upgrade_type;
SET search_path TO upgrade_type, public;
CREATE TYPE type1 AS (a int, b int);
CREATE TABLE tt (a int PRIMARY KEY, b type1);
SELECT create_distributed_table('tt','a');
create_distributed_table
--------------------------
(1 row)
INSERT INTO tt VALUES (2, (3,4)::type1);

View File

@ -3,7 +3,6 @@
-- this file is intended to create the table requires for the tests
--
SET citus.next_shard_id TO 1400000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1400000;
SET citus.shard_replication_factor = 1;
SET citus.shard_count = 32;

View File

@ -3,7 +3,6 @@
-- this file is intended to create the table requires for the tests
--
SET citus.next_shard_id TO 1400000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1400000;
SET citus.shard_replication_factor = 1;
SET citus.shard_count = 32;
CREATE SCHEMA with_basics;

View File

@ -1,17 +0,0 @@
SET search_path TO before_citus_upgrade_coord, public;
SELECT * FROM t ORDER BY a;
SELECT * FROM t WHERE a = 1;
INSERT INTO t SELECT * FROM generate_series(10, 15);
SELECT * FROM t WHERE a = 10;
SELECT * FROM t WHERE a = 11;
TRUNCATE TABLE t;
SELECT * FROM T;
DROP TABLE t;
DROP SCHEMA before_citus_upgrade_coord CASCADE;

View File

@ -1,6 +0,0 @@
CREATE SCHEMA before_citus_upgrade_coord;
SET search_path TO before_citus_upgrade_coord, public;
CREATE TABLE t(a int);
SELECT create_distributed_table('t', 'a');
INSERT INTO t SELECT * FROM generate_series(1, 5);

View File

@ -8,7 +8,6 @@
SET citus.next_shard_id TO 580000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 580000;
CREATE SCHEMA test;

View File

@ -9,7 +9,6 @@ SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
SET citus.next_shard_id TO 1420000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1420000;
SET citus.shard_replication_factor TO 1;

View File

@ -6,7 +6,6 @@
-- citus_total_relation_size are also tested.
SET citus.next_shard_id TO 1460000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1460000;
SET citus.shard_replication_factor to 1;

View File

@ -3,7 +3,6 @@
-- this file is intended to test multi shard update/delete queries
--
SET citus.next_shard_id TO 1440000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1440000;
SET citus.shard_replication_factor to 1;
SET citus.multi_shard_modify_mode to 'parallel';

View File

@ -6,7 +6,6 @@
-- citus_total_relation_size are also tested.
SET citus.next_shard_id TO 1390000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1390000;
-- Tests with invalid relation IDs
SELECT citus_table_size(1);

View File

@ -6,7 +6,6 @@
-- We don't need shard id sequence here, so commented out to prevent conflicts with concurrent tests
-- SET citus.next_shard_id TO 1400000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1400000;
--
-- UNIONs and JOINs mixed

View File

@ -6,7 +6,6 @@
-- We don't need shard id sequence here, so commented out to prevent conflicts with concurrent tests
-- SET citus.next_shard_id TO 1400000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1400000;
CREATE TABLE user_buy_test_table(user_id int, item_id int, buy_count int);
SELECT create_distributed_table('user_buy_test_table', 'user_id');

View File

@ -2,7 +2,6 @@
-- MULTI_TASK_STRING_SIZE
--
SET citus.next_shard_id TO 1602000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1602000;
CREATE TABLE wide_table
(

View File

@ -0,0 +1,120 @@
SET search_path TO upgrade_basic, public, pg_catalog;
BEGIN;
SELECT * FROM pg_indexes WHERE schemaname = 'upgrade_basic' ORDER BY tablename;
SELECT nextval('pg_dist_shardid_seq') = MAX(shardid)+1 FROM pg_dist_shard;
SELECT nextval('pg_dist_placement_placementid_seq') = MAX(placementid)+1 FROM pg_dist_placement;
SELECT nextval('pg_dist_groupid_seq') = MAX(groupid)+1 FROM pg_dist_node;
SELECT nextval('pg_dist_node_nodeid_seq') = MAX(nodeid)+1 FROM pg_dist_node;
SELECT nextval('pg_dist_colocationid_seq') = MAX(colocationid)+1 FROM pg_dist_colocation;
-- If this query gives output it means we've added a new sequence that should
-- possibly be restored after upgrades.
SELECT sequence_name FROM information_schema.sequences
WHERE sequence_name LIKE 'pg_dist_%'
AND sequence_name NOT IN (
-- these ones are restored above
'pg_dist_shardid_seq',
'pg_dist_placement_placementid_seq',
'pg_dist_groupid_seq',
'pg_dist_node_nodeid_seq',
'pg_dist_colocationid_seq'
);
SELECT logicalrelid FROM pg_dist_partition
JOIN pg_depend ON logicalrelid=objid
JOIN pg_catalog.pg_class ON logicalrelid=oid
WHERE
refobjid=(select oid FROM pg_extension WHERE extname = 'citus')
AND relnamespace='upgrade_basic'::regnamespace
ORDER BY logicalrelid;
SELECT tgrelid::regclass, tgfoid::regproc, tgisinternal, tgenabled, tgtype::int4::bit(8)
FROM pg_dist_partition
JOIN pg_trigger ON tgrelid=logicalrelid
JOIN pg_class ON pg_class.oid=logicalrelid
WHERE
relnamespace='upgrade_basic'::regnamespace
AND tgname LIKE 'truncate_trigger_%'
ORDER BY tgrelid::regclass;
SELECT * FROM t ORDER BY a;
SELECT * FROM t WHERE a = 1;
INSERT INTO t SELECT * FROM generate_series(10, 15);
EXPLAIN (COSTS FALSE) SELECT * from t;
EXPLAIN (COSTS FALSE) SELECT * from t WHERE a = 1;
SELECT * FROM t WHERE a = 10;
SELECT * FROM t WHERE a = 11;
COPY t FROM PROGRAM 'echo 20 && echo 21 && echo 22 && echo 23 && echo 24' WITH CSV;
ALTER TABLE t ADD COLUMN b int DEFAULT 10;
SELECT * FROM t ORDER BY a;
TRUNCATE TABLE t;
SELECT * FROM T;
DROP TABLE t;
\d t
-- verify that the table whose column is dropped before a pg_upgrade still works as expected.
SELECT * FROM t_ab ORDER BY b;
SELECT * FROM t_ab WHERE b = 11;
SELECT * FROM t_ab WHERE b = 22;
-- Check that we can create a distributed table out of a table that was created
-- before the upgrade
SELECT * FROM t2 ORDER BY a;
SELECT create_distributed_table('t2', 'a');
SELECT * FROM t2 ORDER BY a;
ROLLBACK;
BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
SELECT * FROM r ORDER BY a;
SELECT * FROM tr ORDER BY pk;
DELETE FROM r where a = 1;
SELECT * FROM r ORDER BY a;
SELECT * FROM tr ORDER BY pk;
UPDATE r SET a = 30 WHERE a = 3;
SELECT * FROM r ORDER BY a;
SELECT * FROM tr ORDER BY pk;
-- Check we can still create distributed tables after upgrade
CREATE TABLE t3(a int, b int);
SELECT create_distributed_table('t3', 'a');
INSERT INTO t3 VALUES (1, 11);
INSERT INTO t3 VALUES (2, 22);
INSERT INTO t3 VALUES (3, 33);
SELECT * FROM t3 ORDER BY a;
SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard
WHERE logicalrelid = 't_append'::regclass
ORDER BY shardminvalue, shardmaxvalue;
SELECT * FROM t_append ORDER BY id;
\copy t_append FROM STDIN DELIMITER ','
9,2
10,3
11,4
\.
SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard
WHERE logicalrelid = 't_append'::regclass
ORDER BY shardminvalue, shardmaxvalue;
SELECT * FROM t_append ORDER BY id;
ROLLBACK;

View File

@ -0,0 +1,50 @@
CREATE SCHEMA upgrade_basic;
SET search_path TO upgrade_basic, public;
CREATE TABLE t(a int);
CREATE INDEX ON t USING HASH (a);
SELECT create_distributed_table('t', 'a');
INSERT INTO t SELECT * FROM generate_series(1, 5);
CREATE TABLE tp(a int PRIMARY KEY);
SELECT create_distributed_table('tp', 'a');
INSERT INTO tp SELECT * FROM generate_series(1, 5);
-- We store the index of distribution column and here we check that the distribution
-- column index does not change after an upgrade if we drop a column that comes before the
-- distribution column. The index information is in partkey column of pg_dist_partition table.
CREATE TABLE t_ab(a int, b int);
SELECT create_distributed_table('t_ab', 'b');
INSERT INTO t_ab VALUES (1, 11);
INSERT INTO t_ab VALUES (2, 22);
INSERT INTO t_ab VALUES (3, 33);
CREATE TABLE t2(a int, b int);
INSERT INTO t2 VALUES (1, 11);
INSERT INTO t2 VALUES (2, 22);
INSERT INTO t2 VALUES (3, 33);
ALTER TABLE t_ab DROP a;
-- Check that basic reference tables work
CREATE TABLE r(a int PRIMARY KEY);
SELECT create_reference_table('r');
INSERT INTO r SELECT * FROM generate_series(1, 5);
CREATE TABLE tr(pk int, a int REFERENCES r(a) ON DELETE CASCADE ON UPDATE CASCADE);
SELECT create_distributed_table('tr', 'pk');
INSERT INTO tr SELECT c, c FROM generate_series(1, 5) as c;
CREATE TABLE t_append(id int, value_1 int);
SELECT master_create_distributed_table('t_append', 'id', 'append');
\copy t_append FROM STDIN DELIMITER ','
1,2
2,3
3,4
\.
\copy t_append FROM STDIN DELIMITER ','
5,2
6,3
7,4
\.

View File

@ -1,28 +0,0 @@
SET search_path TO upgrade_distributed_table_before, public;
SELECT * FROM t ORDER BY a;
SELECT * FROM t WHERE a = 1;
INSERT INTO t SELECT * FROM generate_series(10, 15);
SELECT * FROM t WHERE a = 10;
SELECT * FROM t WHERE a = 11;
-- test distributed type
INSERT INTO t1 VALUES (1, (2,3)::tc1);
SELECT * FROM t1;
ALTER TYPE tc1 RENAME TO tc1_newname;
INSERT INTO t1 VALUES (3, (4,5)::tc1_newname);
TRUNCATE TABLE t;
SELECT * FROM T;
-- verify that the table whose column is dropped before a pg_upgrade still works as expected.
SELECT * FROM t_ab ORDER BY b;
SELECT * FROM t_ab WHERE b = 11;
SELECT * FROM t_ab WHERE b = 22;
DROP TABLE t;
DROP SCHEMA upgrade_distributed_table_before CASCADE;

View File

@ -1,22 +0,0 @@
CREATE SCHEMA upgrade_distributed_table_before;
SET search_path TO upgrade_distributed_table_before, public;
CREATE TABLE t(a int);
SELECT create_distributed_table('t', 'a');
INSERT INTO t SELECT * FROM generate_series(1, 5);
CREATE TYPE tc1 AS (a int, b int);
CREATE TABLE t1 (a int PRIMARY KEY, b tc1);
SELECT create_distributed_table('t1','a');
-- We store the index of distribution column and here we check that the distribution
-- column index does not change after an upgrade if we drop a column that comes before the
-- distribution column. The index information is in partkey column of pg_dist_partition table.
CREATE TABLE t_ab(a int, b int);
SELECT create_distributed_table('t_ab', 'b');
INSERT INTO t_ab VALUES (1, 11);
INSERT INTO t_ab VALUES (2, 22);
INSERT INTO t_ab VALUES (3, 33);
ALTER TABLE t_ab DROP a;

View File

@ -0,0 +1,24 @@
SET search_path TO upgrade_ref2ref, public;
BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
SELECT * FROM ref_table_1 ORDER BY id;
SELECT * FROM ref_table_2 ORDER BY id;
SELECT * FROM ref_table_3 ORDER BY id;
SELECT * FROM dist_table ORDER BY id;
UPDATE ref_table_1 SET id = 10 where id = 1;
SELECT * FROM ref_table_1 ORDER BY id;
SELECT * FROM ref_table_2 ORDER BY id;
SELECT * FROM ref_table_3 ORDER BY id;
SELECT * FROM dist_table ORDER BY id;
DELETE FROM ref_table_1 WHERE id = 4;
SELECT * FROM ref_table_1 ORDER BY id;
SELECT * FROM ref_table_2 ORDER BY id;
SELECT * FROM ref_table_3 ORDER BY id;
SELECT * FROM dist_table ORDER BY id;
ROLLBACK;

View File

@ -0,0 +1,18 @@
CREATE SCHEMA upgrade_ref2ref;
SET search_path TO upgrade_ref2ref, public;
CREATE TABLE ref_table_1(id int PRIMARY KEY, value int);
SELECT create_reference_table('ref_table_1');
CREATE TABLE ref_table_2(id int PRIMARY KEY, value int REFERENCES ref_table_1(id) ON DELETE CASCADE ON UPDATE CASCADE);
SELECT create_reference_table('ref_table_2');
CREATE TABLE ref_table_3(id int PRIMARY KEY, value int REFERENCES ref_table_2(id) ON DELETE CASCADE ON UPDATE CASCADE);
SELECT create_reference_table('ref_table_3');
CREATE TABLE dist_table(id int PRIMARY KEY, value int REFERENCES ref_table_2(id) ON DELETE CASCADE ON UPDATE CASCADE);
SELECT create_distributed_table('dist_table', 'id');
INSERT INTO ref_table_1 SELECT c, c FROM generate_series(1, 5) as c;
INSERT INTO ref_table_2 SELECT * FROM ref_table_1;
INSERT INTO ref_table_3 SELECT * FROM ref_table_2;
INSERT INTO dist_table SELECT * FROM ref_table_3;

View File

@ -0,0 +1,11 @@
SET search_path TO upgrade_type, public;
BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
-- test distributed type
INSERT INTO tt VALUES (1, (2,3)::type1);
SELECT * FROM tt;
ALTER TYPE type1 RENAME TO type1_newname;
INSERT INTO tt VALUES (3, (4,5)::type1_newname);
ROLLBACK;

View File

@ -0,0 +1,6 @@
CREATE SCHEMA upgrade_type;
SET search_path TO upgrade_type, public;
CREATE TYPE type1 AS (a int, b int);
CREATE TABLE tt (a int PRIMARY KEY, b type1);
SELECT create_distributed_table('tt','a');
INSERT INTO tt VALUES (2, (3,4)::type1);

View File

@ -42,7 +42,10 @@ How the postgres upgrade test works:
- Temporary folder `tmp_upgrade` is created in `src/test/regress/`, if one exists it is removed first.
- Database is initialized and citus cluster is created(1 coordinator + 2 workers) with old postgres.
- `before_pg_upgrade_schedule` is run with `pg_regress`. This schedule does not drop any tables or data so that we can verify upgrade.
- `before_pg_upgrade_schedule` is run with `pg_regress`. This schedule sets up any
objects and data that will be tested for preservation after the upgrade. It
- `after_pg_upgrade_schedule` is run with `pg_regress` to verify that the output
of those tests is the same before the upgrade as after.
- `citus_prepare_pg_upgrade` is run in coordinators and workers.
- Old database is stopped.
- A new database is initialized with new postgres under `tmp_upgrade`.
@ -51,6 +54,20 @@ How the postgres upgrade test works:
- `citus_finish_pg_upgrade` is run in coordinators and workers to finalize the upgrade step.
- `after_pg_upgrade_schedule` is run with `pg_regress` to verify that the previously created tables, and data still exist. Router and realtime queries are used to verify this.
### Writing new PG upgrade tests
The main important thing is that we have `upgrade_{name}_before` and
`upgrade_{name}_after` tests. The `before` files are used to setup any objects
and data before the upgrade. The `after` tests shouldn't have any side effects
since they are run twice (once before and once after the upgrade).
Furthermore, anything that is basic Citus functionality should go in the
`upgrade_basic_before`/`upgrade_basic_after` tests. This test file is used
during PG upgrades and Citus upgrades. Any features that don't work in old Citus
versions should thus be added to their own file, because that file will then
only be run during PG versions.
## Citus Upgrade Test
Citus upgrade test is used for testing citus version upgrades from specific version to master. The purpose of this test is to ensure that a newly made change does not result in unexpected upgrade errors.

View File

@ -60,9 +60,11 @@ def stop_all_databases(old_bindir, new_bindir, old_datadir, new_datadir):
def main(config):
common.initialize_temp_dir(config.temp_dir)
common.initialize_citus_cluster(config.old_bindir, config.old_datadir, config.settings)
common.initialize_citus_cluster(config.old_bindir, config.old_datadir, config.settings)
common.run_pg_regress(config.old_bindir, config.pg_srcdir,
NODE_PORTS[COORDINATOR_NAME], BEFORE_PG_UPGRADE_SCHEDULE)
common.run_pg_regress(config.old_bindir, config.pg_srcdir,
NODE_PORTS[COORDINATOR_NAME], AFTER_PG_UPGRADE_SCHEDULE)
citus_prepare_pg_upgrade(config.old_bindir)
common.stop_databases(config.old_bindir, config.old_datadir)